relative_path
stringclasses
812 values
section
stringclasses
339 values
filename
stringlengths
2
61
text
stringlengths
6
1.76M
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/optimizer
optimizer
AdamW
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. _target_: torch.optim.AdamW lr: 0.001 betas: [0.9, 0.999] eps: 1e-8 weight_decay: 0.0 amsgrad: False
PyTorch/DrugDiscovery/MoFlow/scripts
scripts
data_preprocess
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2020 Chengxi Zang # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import os import pandas as pd import argparse import time from moflow.config import CONFIGS from moflow.data.data_frame_parser import DataFrameParser from moflow.data.encoding import MolEncoder def parse_args(): parser = argparse.ArgumentParser(description='') parser.add_argument('--data_name', type=str, choices=list(CONFIGS), help='dataset to be downloaded') parser.add_argument('--data_dir', type=str, default='/data') args = parser.parse_args() return args def main(args): start_time = time.time() args = parse_args() print('args', vars(args)) assert args.data_name in CONFIGS dataset_config = CONFIGS[args.data_name].dataset_config preprocessor = MolEncoder(out_size=dataset_config.max_num_atoms) input_path = os.path.join(args.data_dir, dataset_config.csv_file) output_path = os.path.join(args.data_dir, dataset_config.dataset_file) print(f'Preprocessing {args.data_name} data:') df = pd.read_csv(input_path, index_col=0) parser = DataFrameParser(preprocessor, labels=dataset_config.labels, smiles_col=dataset_config.smiles_col) dataset = parser.parse(df) dataset.save(output_path) print('Total time:', time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time))) if __name__ == '__main__': args = parse_args() main(args)
PyTorch/SpeechRecognition/QuartzNet/scripts
scripts
inference
#!/bin/bash # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. : ${DATA_DIR:=${1:-"/datasets/LibriSpeech"}} : ${MODEL_CONFIG:=${2:-"configs/quartznet15x5_speedp-online-1.15_speca.yaml"}} : ${OUTPUT_DIR:=${3:-"/results"}} : ${CHECKPOINT:=${4:-"pretrained_models/quartznet_en/nvidia_quartznet_210504.pt"}} : ${DATASET:="test-other"} : ${LOG_FILE:=""} : ${CUDNN_BENCHMARK:=false} : ${MAX_DURATION:=""} : ${PAD_TO_MAX_DURATION:=false} : ${NUM_GPUS:=1} : ${NUM_STEPS:=0} : ${NUM_WARMUP_STEPS:=0} : ${AMP:=false} : ${BATCH_SIZE:=64} : ${EMA:=true} : ${SEED:=0} : ${DALI_DEVICE:="gpu"} : ${CPU:=false} : ${LOGITS_FILE:=} : ${PREDICTION_FILE:="${OUTPUT_DIR}/${DATASET}.predictions"} mkdir -p "$OUTPUT_DIR" ARGS="--dataset_dir=$DATA_DIR" ARGS+=" --val_manifest=$DATA_DIR/librispeech-${DATASET}-wav.json" ARGS+=" --model_config=$MODEL_CONFIG" ARGS+=" --output_dir=$OUTPUT_DIR" ARGS+=" --batch_size=$BATCH_SIZE" ARGS+=" --seed=$SEED" ARGS+=" --dali_device=$DALI_DEVICE" ARGS+=" --steps $NUM_STEPS" ARGS+=" --warmup_steps $NUM_WARMUP_STEPS" [ "$AMP" = true ] && ARGS+=" --amp" [ "$EMA" = true ] && ARGS+=" --ema" [ "$CUDNN_BENCHMARK" = true ] && ARGS+=" --cudnn_benchmark" [ -n "$CHECKPOINT" ] && ARGS+=" --ckpt=${CHECKPOINT}" [ -n "$LOG_FILE" ] && ARGS+=" --log_file $LOG_FILE" [ -n "$PREDICTION_FILE" ] && ARGS+=" --save_prediction $PREDICTION_FILE" [ -n "$LOGITS_FILE" ] && ARGS+=" --logits_save_to $LOGITS_FILE" [ "$CPU" == "true" ] && ARGS+=" --cpu" [ -n "$MAX_DURATION" ] && ARGS+=" --override_config input_val.audio_dataset.max_duration=$MAX_DURATION" \ ARGS+=" --override_config input_val.filterbank_features.max_duration=$MAX_DURATION" [ "$PAD_TO_MAX_DURATION" = true ] && ARGS+=" --override_config input_val.audio_dataset.pad_to_max_duration=True" \ ARGS+=" --override_config input_val.filterbank_features.pad_to_max_duration=True" python -m torch.distributed.launch --nproc_per_node=$NUM_GPUS inference.py $ARGS
TensorFlow2/Recommendation/DLRM_and_DCNv2
DLRM_and_DCNv2
requirements
git+https://github.com/NVIDIA/dllogger#egg=dllogger absl-py>=0.7.0 pyarrow pandas joblib tqdm pyyaml onnxruntime git+https://github.com/onnx/tensorflow-onnx numpy<1.24 tabulate>=0.8.7 natsort>=7.0.0
PyTorch/SpeechSynthesis/FastPitch/fastpitch
fastpitch
transformer
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn import torch.nn.functional as F from common.utils import mask_from_lens class PositionalEmbedding(nn.Module): def __init__(self, demb): super(PositionalEmbedding, self).__init__() self.demb = demb inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb)) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq, bsz=None): sinusoid_inp = torch.matmul(torch.unsqueeze(pos_seq, -1), torch.unsqueeze(self.inv_freq, 0)) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1) if bsz is not None: return pos_emb[None, :, :].expand(bsz, -1, -1) else: return pos_emb[None, :, :] class PositionwiseConvFF(nn.Module): def __init__(self, d_model, d_inner, kernel_size, dropout, pre_lnorm=False): super(PositionwiseConvFF, self).__init__() self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.CoreNet = nn.Sequential( nn.Conv1d(d_model, d_inner, kernel_size, 1, (kernel_size // 2)), nn.ReLU(), # nn.Dropout(dropout), # worse convergence nn.Conv1d(d_inner, d_model, kernel_size, 1, (kernel_size // 2)), nn.Dropout(dropout), ) self.layer_norm = nn.LayerNorm(d_model) self.pre_lnorm = pre_lnorm def forward(self, inp): return self._forward(inp) def _forward(self, inp): if self.pre_lnorm: # layer normalization + positionwise feed-forward core_out = inp.transpose(1, 2) core_out = self.CoreNet(self.layer_norm(core_out).to(inp.dtype)) core_out = core_out.transpose(1, 2) # residual connection output = core_out + inp else: # positionwise feed-forward core_out = inp.transpose(1, 2) core_out = self.CoreNet(core_out) core_out = core_out.transpose(1, 2) # residual connection + layer normalization output = self.layer_norm(inp + core_out).to(inp.dtype) return output class MultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1, pre_lnorm=False): super(MultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.scale = 1 / (d_head ** 0.5) self.pre_lnorm = pre_lnorm self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) def forward(self, inp, attn_mask=None): return self._forward(inp, attn_mask) def _forward(self, inp, attn_mask=None): residual = inp if self.pre_lnorm: # layer normalization inp = self.layer_norm(inp) n_head, d_head = self.n_head, self.d_head head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2) head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head) head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head) head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head) q = head_q.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head) k = head_k.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head) v = head_v.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head) attn_score = torch.bmm(q, k.transpose(1, 2)) attn_score.mul_(self.scale) if attn_mask is not None: attn_mask = attn_mask.unsqueeze(1).to(attn_score.dtype) attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1) attn_score.masked_fill_(attn_mask.to(torch.bool), -float('inf')) attn_prob = F.softmax(attn_score, dim=2) attn_prob = self.dropatt(attn_prob) attn_vec = torch.bmm(attn_prob, v) attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head) attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view( inp.size(0), inp.size(1), n_head * d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection output = residual + attn_out else: # residual connection + layer normalization output = self.layer_norm(residual + attn_out) output = output.to(attn_out.dtype) return output class TransformerLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, kernel_size, dropout, **kwargs): super(TransformerLayer, self).__init__() self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseConvFF(d_model, d_inner, kernel_size, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, mask=None): output = self.dec_attn(dec_inp, attn_mask=~mask.squeeze(2)) output *= mask output = self.pos_ff(output) output *= mask return output class FFTransformer(nn.Module): def __init__(self, n_layer, n_head, d_model, d_head, d_inner, kernel_size, dropout, dropatt, dropemb=0.0, embed_input=True, n_embed=None, d_embed=None, padding_idx=0, pre_lnorm=False): super(FFTransformer, self).__init__() self.d_model = d_model self.n_head = n_head self.d_head = d_head self.padding_idx = padding_idx if embed_input: self.word_emb = nn.Embedding(n_embed, d_embed or d_model, padding_idx=self.padding_idx) else: self.word_emb = None self.pos_emb = PositionalEmbedding(self.d_model) self.drop = nn.Dropout(dropemb) self.layers = nn.ModuleList() for _ in range(n_layer): self.layers.append( TransformerLayer( n_head, d_model, d_head, d_inner, kernel_size, dropout, dropatt=dropatt, pre_lnorm=pre_lnorm) ) def forward(self, dec_inp, seq_lens=None, conditioning=0): if self.word_emb is None: inp = dec_inp mask = mask_from_lens(seq_lens).unsqueeze(2) else: inp = self.word_emb(dec_inp) # [bsz x L x 1] mask = (dec_inp != self.padding_idx).unsqueeze(2) pos_seq = torch.arange(inp.size(1), device=inp.device).to(inp.dtype) pos_emb = self.pos_emb(pos_seq) * mask out = self.drop(inp + pos_emb + conditioning) for layer in self.layers: out = layer(out, mask=mask) # out = self.drop(out) return out, mask
TensorFlow/Segmentation/UNet_3D_Medical
UNet_3D_Medical
requirements
nibabel
PyTorch/SpeechRecognition/Jasper/common
common
filter_warnings
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mutes known and unrelated PyTorch warnings. The warnings module keeps a list of filters. Importing it as late as possible prevents its filters from being overriden. """ import warnings # NGC 22.04-py3 container (PyTorch 1.12.0a0+bd13bc6) warnings.filterwarnings( "ignore", message='positional arguments and argument "destination" are deprecated.' ' nn.Module.state_dict will not accept them in the future.') # 22.08-py3 container warnings.filterwarnings( "ignore", message="is_namedtuple is deprecated, please use the python checks")
TensorFlow2/Detection/Efficientdet/model
model
iou_utils
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """IoU utils for box regression with iou losses. Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression. https://arxiv.org/pdf/1911.08287.pdf """ import math from typing import Union, Text import numpy as np import tensorflow as tf FloatType = Union[tf.Tensor, float, np.float32, np.float64] def _get_v(b1_height: FloatType, b1_width: FloatType, b2_height: FloatType, b2_width: FloatType) -> tf.Tensor: """Get the consistency measurement of aspect ratio for ciou.""" @tf.custom_gradient def _get_grad_v(height, width): """backpropogate gradient.""" arctan = tf.atan(tf.math.divide_no_nan(b1_width, b1_height)) - tf.atan( tf.math.divide_no_nan(width, height)) v = 4 * ((arctan / math.pi)**2) def _grad_v(dv): """Grad for eager mode.""" gdw = dv * 8 * arctan * height / (math.pi**2) gdh = -dv * 8 * arctan * width / (math.pi**2) return [gdh, gdw] def _grad_v_graph(dv, variables): """Grad for graph mode.""" gdw = dv * 8 * arctan * height / (math.pi**2) gdh = -dv * 8 * arctan * width / (math.pi**2) return [gdh, gdw], tf.gradients(v, variables, grad_ys=dv) if tf.compat.v1.executing_eagerly_outside_functions(): return v, _grad_v return v, _grad_v_graph return _get_grad_v(b2_height, b2_width) def _iou_per_anchor(pred_boxes: FloatType, target_boxes: FloatType, iou_type: Text = 'iou') -> tf.Tensor: """Computing the IoU for a single anchor. Args: pred_boxes: predicted boxes, with coordinate [y_min, x_min, y_max, x_max]. target_boxes: target boxes, with coordinate [y_min, x_min, y_max, x_max]. iou_type: one of ['iou', 'ciou', 'diou', 'giou']. Returns: IoU loss float `Tensor`. """ # t_ denotes target boxes and p_ denotes predicted boxes. t_ymin, t_xmin, t_ymax, t_xmax = target_boxes p_ymin, p_xmin, p_ymax, p_xmax = pred_boxes zero = tf.convert_to_tensor(0.0, t_ymin.dtype) p_width = tf.maximum(zero, p_xmax - p_xmin) p_height = tf.maximum(zero, p_ymax - p_ymin) t_width = tf.maximum(zero, t_xmax - t_xmin) t_height = tf.maximum(zero, t_ymax - t_ymin) p_area = p_width * p_height t_area = t_width * t_height intersect_ymin = tf.maximum(p_ymin, t_ymin) intersect_xmin = tf.maximum(p_xmin, t_xmin) intersect_ymax = tf.minimum(p_ymax, t_ymax) intersect_xmax = tf.minimum(p_xmax, t_xmax) intersect_width = tf.maximum(zero, intersect_xmax - intersect_xmin) intersect_height = tf.maximum(zero, intersect_ymax - intersect_ymin) intersect_area = intersect_width * intersect_height union_area = p_area + t_area - intersect_area iou_v = tf.math.divide_no_nan(intersect_area, union_area) if iou_type == 'iou': return iou_v # iou is the simplest form. enclose_ymin = tf.minimum(p_ymin, t_ymin) enclose_xmin = tf.minimum(p_xmin, t_xmin) enclose_ymax = tf.maximum(p_ymax, t_ymax) enclose_xmax = tf.maximum(p_xmax, t_xmax) assert iou_type in ('giou', 'diou', 'ciou') if iou_type == 'giou': # giou is the generalized iou. enclose_width = tf.maximum(zero, enclose_xmax - enclose_xmin) enclose_height = tf.maximum(zero, enclose_ymax - enclose_ymin) enclose_area = enclose_width * enclose_height giou_v = iou_v - tf.math.divide_no_nan( (enclose_area - union_area), enclose_area) return giou_v assert iou_type in ('diou', 'ciou') p_center = tf.stack([(p_ymin + p_ymax) / 2, (p_xmin + p_xmax) / 2], axis=-1) t_center = tf.stack([(t_ymin + t_ymax) / 2, (t_xmin + t_xmax) / 2], axis=-1) euclidean = tf.linalg.norm(t_center - p_center, axis=-1) diag_length = tf.linalg.norm( tf.stack([enclose_ymax - enclose_ymin, enclose_xmax - enclose_xmin], axis=-1), axis=-1) diou_v = iou_v - tf.math.divide_no_nan(euclidean**2, diag_length**2) if iou_type == 'diou': # diou is the distance iou. return diou_v assert iou_type == 'ciou' v = _get_v(p_height, p_width, t_height, t_width) alpha = tf.math.divide_no_nan(v, ((1 - iou_v) + v)) return diou_v - alpha * v # the last one is ciou. def iou_loss(pred_boxes: FloatType, target_boxes: FloatType, iou_type: Text = 'iou') -> tf.Tensor: """A unified interface for computing various IoU losses. Let B and B_gt denotes the pred_box and B_gt is the target box (ground truth): IoU = |B & B_gt| / |B | B_gt| GIoU = IoU - |C - B U B_gt| / C, where C is the smallest box covering B and B_gt. DIoU = IoU - E(B, B_gt)^2 / c^2, E is the Euclidean distance of the center points of B and B_gt, and c is the diagonal length of the smallest box covering the two boxes CIoU = IoU - DIoU - a * v, where a is a positive trade-off parameter, and v measures the consistency of aspect ratio: v = (arctan(w_gt / h_gt) - arctan(w / h)) * 4 / pi^2 where (w_gt, h_gt) and (w, h) are the width and height of the target and predicted box respectively. The returned loss is computed as 1 - one of {IoU, GIoU, DIoU, CIoU}. Args: pred_boxes: predicted boxes, with coordinate [y_min, x_min, y_max, x_max]*. It can be multiple anchors, with each anchor box has four coordinates. target_boxes: target boxes, with coordinate [y_min, x_min, y_max, x_max]*. It can be multiple anchors, with each anchor box has four coordinates. iou_type: one of ['iou', 'ciou', 'diou', 'giou']. Returns: IoU loss float `Tensor`. """ if iou_type not in ('iou', 'ciou', 'diou', 'giou'): raise ValueError( 'Unknown loss_type {}, not iou/ciou/diou/giou'.format(iou_type)) pred_boxes = tf.convert_to_tensor(pred_boxes, tf.float32) target_boxes = tf.cast(target_boxes, pred_boxes.dtype) # t_ denotes target boxes and p_ denotes predicted boxes: (y, x, y_max, x_max) pred_boxes_list = tf.unstack(pred_boxes, None, axis=-1) target_boxes_list = tf.unstack(target_boxes, None, axis=-1) assert len(pred_boxes_list) == len(target_boxes_list) assert len(pred_boxes_list) % 4 == 0 iou_loss_list = [] for i in range(0, len(pred_boxes_list), 4): pred_boxes = pred_boxes_list[i:i + 4] target_boxes = target_boxes_list[i:i + 4] # Compute mask. t_ymin, t_xmin, t_ymax, t_xmax = target_boxes mask = tf.math.logical_and(t_ymax > t_ymin, t_xmax > t_xmin) mask = tf.cast(mask, t_ymin.dtype) # Loss should be mask * (1 - iou) = mask - masked_iou. pred_boxes = [b * mask for b in pred_boxes] target_boxes = [b * mask for b in target_boxes] iou_loss_list.append( mask * (1 - tf.squeeze(_iou_per_anchor(pred_boxes, target_boxes, iou_type)))) if len(iou_loss_list) == 1: return iou_loss_list[0] return tf.reduce_sum(tf.stack(iou_loss_list), 0)
TensorFlow/Recommendation/WideAndDeep/preproc
preproc
preproc2
#!/usr/bin/env python # coding: utf-8 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import math import pickle import pyspark.sql.functions as F import time from collections import defaultdict from pyspark.context import SparkContext, SparkConf from pyspark.sql.session import SparkSession from pyspark.sql.types import IntegerType, StringType, StructType, StructField, TimestampType, FloatType, ArrayType, \ MapType OUTPUT_BUCKET_FOLDER = "/outbrain/preprocessed/" DATA_BUCKET_FOLDER = "/outbrain/orig/" SPARK_TEMP_FOLDER = "/outbrain/spark-temp/" parser = argparse.ArgumentParser() parser.add_argument( '--submission', action='store_true', default=False ) args = parser.parse_args() evaluation = not args.submission conf = SparkConf().setMaster('local[*]').set('spark.executor.memory', '40g').set('spark.driver.memory', '200g').set( "spark.local.dir", SPARK_TEMP_FOLDER) sc = SparkContext(conf=conf) spark = SparkSession(sc) start_time = time.time() print('Loading data...') truncate_day_from_timestamp_udf = F.udf(lambda ts: int(ts / 1000 / 60 / 60 / 24), IntegerType()) extract_country_udf = F.udf(lambda geo: geo.strip()[:2] if geo is not None else '', StringType()) documents_meta_schema = StructType( [StructField("document_id_doc", IntegerType(), True), StructField("source_id", IntegerType(), True), StructField("publisher_id", IntegerType(), True), StructField("publish_time", TimestampType(), True)] ) documents_meta_df = spark.read.schema(documents_meta_schema) \ .options(header='true', inferschema='false', nullValue='\\N') \ .csv(DATA_BUCKET_FOLDER + "documents_meta.csv") \ .withColumn('dummyDocumentsMeta', F.lit(1)).alias('documents_meta') documents_meta_df.count() print('Drop rows with empty "source_id"...') documents_meta_df = documents_meta_df.dropna(subset="source_id") documents_meta_df.count() source_publishers_df = documents_meta_df.select(["source_id", "publisher_id"]).dropDuplicates() source_publishers_df.count() print('Get list of source_ids without publisher_id...') rows_no_pub = source_publishers_df.filter("publisher_id is NULL") source_ids_without_publisher = [row['source_id'] for row in rows_no_pub.collect()] len(source_ids_without_publisher) print('Maximum value of publisher_id used so far...') max_pub = max(source_publishers_df.select(["publisher_id"]).dropna().collect())['publisher_id'] max_pub print('Rows filled with new publisher_ids') new_publishers = [(source, max_pub + 1 + nr) for nr, source in enumerate(source_ids_without_publisher)] new_publishers_df = spark.createDataFrame(new_publishers, ("source_id", "publisher_id")) new_publishers_df.take(10) # old and new publishers merged fixed_source_publishers_df = source_publishers_df.dropna().union(new_publishers_df) fixed_source_publishers_df.collect()[-30:] print('Update documents_meta with bew publishers...') documents_meta_df = documents_meta_df.drop('publisher_id').join(fixed_source_publishers_df, on='source_id') documents_meta_df.count() documents_categories_schema = StructType( [StructField("document_id_cat", IntegerType(), True), StructField("category_id", IntegerType(), True), StructField("confidence_level_cat", FloatType(), True)] ) documents_categories_df = spark.read.schema(documents_categories_schema) \ .options(header='true', inferschema='false', nullValue='\\N') \ .csv(DATA_BUCKET_FOLDER + "documents_categories.csv") \ .alias('documents_categories') documents_categories_grouped_df = documents_categories_df.groupBy('document_id_cat') \ .agg(F.collect_list('category_id').alias('category_id_list'), F.collect_list('confidence_level_cat').alias('cat_confidence_level_list')) \ .withColumn('dummyDocumentsCategory', F.lit(1)) \ .alias('documents_categories_grouped') documents_topics_schema = StructType( [StructField("document_id_top", IntegerType(), True), StructField("topic_id", IntegerType(), True), StructField("confidence_level_top", FloatType(), True)] ) documents_topics_df = spark.read.schema(documents_topics_schema) \ .options(header='true', inferschema='false', nullValue='\\N') \ .csv(DATA_BUCKET_FOLDER + "documents_topics.csv") \ .alias('documents_topics') documents_topics_grouped_df = documents_topics_df.groupBy('document_id_top') \ .agg(F.collect_list('topic_id').alias('topic_id_list'), F.collect_list('confidence_level_top').alias('top_confidence_level_list')) \ .withColumn('dummyDocumentsTopics', F.lit(1)) \ .alias('documents_topics_grouped') documents_entities_schema = StructType( [StructField("document_id_ent", IntegerType(), True), StructField("entity_id", StringType(), True), StructField("confidence_level_ent", FloatType(), True)] ) documents_entities_df = spark.read.schema(documents_entities_schema) \ .options(header='true', inferschema='false', nullValue='\\N') \ .csv(DATA_BUCKET_FOLDER + "documents_entities.csv") \ .alias('documents_entities') documents_entities_grouped_df = documents_entities_df.groupBy('document_id_ent') \ .agg(F.collect_list('entity_id').alias('entity_id_list'), F.collect_list('confidence_level_ent').alias('ent_confidence_level_list')) \ .withColumn('dummyDocumentsEntities', F.lit(1)) \ .alias('documents_entities_grouped') documents_df = documents_meta_df.join( documents_categories_grouped_df, on=F.col("document_id_doc") == F.col("documents_categories_grouped.document_id_cat"), how='left') \ .join(documents_topics_grouped_df, on=F.col("document_id_doc") == F.col("documents_topics_grouped.document_id_top"), how='left') \ .join(documents_entities_grouped_df, on=F.col("document_id_doc") == F.col("documents_entities_grouped.document_id_ent"), how='left') \ .cache() documents_df.count() if evaluation: validation_set_df = spark.read.parquet(OUTPUT_BUCKET_FOLDER + "validation_set.parquet") \ .alias('validation_set') validation_set_df.select('uuid_event').distinct().createOrReplaceTempView('users_to_profile') validation_set_df.select('uuid_event', 'document_id_promo').distinct() \ .createOrReplaceTempView('validation_users_docs_to_ignore') else: events_schema = StructType( [StructField("display_id", IntegerType(), True), StructField("uuid_event", StringType(), True), StructField("document_id_event", IntegerType(), True), StructField("timestamp_event", IntegerType(), True), StructField("platform_event", IntegerType(), True), StructField("geo_location_event", StringType(), True)] ) events_df = spark.read.schema(events_schema) \ .options(header='true', inferschema='false', nullValue='\\N') \ .csv(DATA_BUCKET_FOLDER + "events.csv") \ .withColumn('dummyEvents', F.lit(1)) \ .withColumn('day_event', truncate_day_from_timestamp_udf('timestamp_event')) \ .withColumn('event_country', extract_country_udf('geo_location_event')) \ .alias('events') # Drop rows with empty "geo_location" events_df = events_df.dropna(subset="geo_location_event") # Drop rows with empty "platform" events_df = events_df.dropna(subset="platform_event") events_df.createOrReplaceTempView('events') promoted_content_schema = StructType( [StructField("ad_id", IntegerType(), True), StructField("document_id_promo", IntegerType(), True), StructField("campaign_id", IntegerType(), True), StructField("advertiser_id", IntegerType(), True)] ) promoted_content_df = spark.read.schema(promoted_content_schema) \ .options(header='true', inferschema='false', nullValue='\\N') \ .csv(DATA_BUCKET_FOLDER + "promoted_content.csv") \ .withColumn('dummyPromotedContent', F.lit(1)).alias('promoted_content') clicks_test_schema = StructType( [StructField("display_id", IntegerType(), True), StructField("ad_id", IntegerType(), True)] ) clicks_test_df = spark.read.schema(clicks_test_schema) \ .options(header='true', inferschema='false', nullValue='\\N') \ .csv(DATA_BUCKET_FOLDER + "clicks_test.csv") \ .withColumn('dummyClicksTest', F.lit(1)).alias('clicks_test') test_set_df = clicks_test_df.join(promoted_content_df, on='ad_id', how='left') \ .join(events_df, on='display_id', how='left') test_set_df.select('uuid_event').distinct().createOrReplaceTempView('users_to_profile') test_set_df.select('uuid_event', 'document_id_promo', 'timestamp_event').distinct() \ .createOrReplaceTempView('test_users_docs_timestamp_to_ignore') page_views_schema = StructType( [StructField("uuid_pv", StringType(), True), StructField("document_id_pv", IntegerType(), True), StructField("timestamp_pv", IntegerType(), True), StructField("platform_pv", IntegerType(), True), StructField("geo_location_pv", StringType(), True), StructField("traffic_source_pv", IntegerType(), True)] ) page_views_df = spark.read.schema(page_views_schema) \ .options(header='true', inferschema='false', nullValue='\\N') \ .csv(DATA_BUCKET_FOLDER + "page_views.csv") \ .alias('page_views') page_views_df.createOrReplaceTempView('page_views') additional_filter = '' if evaluation: additional_filter = ''' AND NOT EXISTS (SELECT uuid_event FROM validation_users_docs_to_ignore WHERE uuid_event = p.uuid_pv AND document_id_promo = p.document_id_pv) ''' else: additional_filter = ''' AND NOT EXISTS (SELECT uuid_event FROM test_users_docs_timestamp_to_ignore WHERE uuid_event = p.uuid_pv AND document_id_promo = p.document_id_pv AND p.timestamp_pv >= timestamp_event) ''' page_views_train_df = spark.sql(''' SELECT * FROM page_views p WHERE EXISTS (SELECT uuid_event FROM users_to_profile WHERE uuid_event = p.uuid_pv) ''' + additional_filter).alias('views') \ .join(documents_df, on=F.col("document_id_pv") == F.col("document_id_doc"), how='left') \ .filter( 'dummyDocumentsEntities is not null OR dummyDocumentsTopics is not null OR dummyDocumentsCategory is not null') print('Processing document frequencies...') documents_total = documents_meta_df.count() documents_total categories_docs_counts = documents_categories_df.groupBy('category_id').count().rdd.collectAsMap() len(categories_docs_counts) df_filenames_suffix = '' if evaluation: df_filenames_suffix = '_eval' with open(OUTPUT_BUCKET_FOLDER + 'categories_docs_counts' + df_filenames_suffix + '.pickle', 'wb') as output: pickle.dump(categories_docs_counts, output) topics_docs_counts = documents_topics_df.groupBy('topic_id').count().rdd.collectAsMap() len(topics_docs_counts) with open(OUTPUT_BUCKET_FOLDER + 'topics_docs_counts' + df_filenames_suffix + '.pickle', 'wb') as output: pickle.dump(topics_docs_counts, output) entities_docs_counts = documents_entities_df.groupBy('entity_id').count().rdd.collectAsMap() len(entities_docs_counts) with open(OUTPUT_BUCKET_FOLDER + 'entities_docs_counts' + df_filenames_suffix + '.pickle', 'wb') as output: pickle.dump(entities_docs_counts, output) print('Processing user profiles...') int_null_to_minus_one_udf = F.udf(lambda x: x if x is not None else -1, IntegerType()) int_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(IntegerType())) float_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(FloatType())) str_list_null_to_empty_list_udf = F.udf(lambda x: x if x is not None else [], ArrayType(StringType())) page_views_by_user_df = page_views_train_df.select( 'uuid_pv', 'document_id_pv', int_null_to_minus_one_udf('timestamp_pv').alias('timestamp_pv'), int_list_null_to_empty_list_udf('category_id_list').alias('category_id_list'), float_list_null_to_empty_list_udf('cat_confidence_level_list').alias('cat_confidence_level_list'), int_list_null_to_empty_list_udf('topic_id_list').alias('topic_id_list'), float_list_null_to_empty_list_udf('top_confidence_level_list').alias('top_confidence_level_list'), str_list_null_to_empty_list_udf('entity_id_list').alias('entity_id_list'), float_list_null_to_empty_list_udf('ent_confidence_level_list').alias('ent_confidence_level_list')) \ .groupBy('uuid_pv') \ .agg(F.collect_list('document_id_pv').alias('document_id_pv_list'), F.collect_list('timestamp_pv').alias('timestamp_pv_list'), F.collect_list('category_id_list').alias('category_id_lists'), F.collect_list('cat_confidence_level_list').alias('cat_confidence_level_lists'), F.collect_list('topic_id_list').alias('topic_id_lists'), F.collect_list('top_confidence_level_list').alias('top_confidence_level_lists'), F.collect_list('entity_id_list').alias('entity_id_lists'), F.collect_list('ent_confidence_level_list').alias('ent_confidence_level_lists')) def get_user_aspects(docs_aspects, aspect_docs_counts): docs_aspects_merged_lists = defaultdict(list) for doc_aspects in docs_aspects: for key in doc_aspects.keys(): docs_aspects_merged_lists[key].append(doc_aspects[key]) docs_aspects_stats = {} for key in docs_aspects_merged_lists.keys(): aspect_list = docs_aspects_merged_lists[key] tf = len(aspect_list) idf = math.log(documents_total / float(aspect_docs_counts[key])) confid_mean = sum(aspect_list) / float(len(aspect_list)) docs_aspects_stats[key] = [tf * idf, confid_mean] return docs_aspects_stats def generate_user_profile(docs_aspects_list, docs_aspects_confidence_list, aspect_docs_counts): docs_aspects = [] for doc_aspects_list, doc_aspects_confidence_list in zip(docs_aspects_list, docs_aspects_confidence_list): doc_aspects = dict(zip(doc_aspects_list, doc_aspects_confidence_list)) docs_aspects.append(doc_aspects) user_aspects = get_user_aspects(docs_aspects, aspect_docs_counts) return user_aspects get_list_len_udf = F.udf(lambda docs_list: len(docs_list), IntegerType()) generate_categories_user_profile_map_udf = F.udf( lambda docs_aspects_list, docs_aspects_confidence_list: generate_user_profile(docs_aspects_list, docs_aspects_confidence_list, categories_docs_counts), MapType(IntegerType(), ArrayType(FloatType()), False)) generate_topics_user_profile_map_udf = F.udf( lambda docs_aspects_list, docs_aspects_confidence_list: generate_user_profile(docs_aspects_list, docs_aspects_confidence_list, topics_docs_counts), MapType(IntegerType(), ArrayType(FloatType()), False)) generate_entities_user_profile_map_udf = F.udf( lambda docs_aspects_list, docs_aspects_confidence_list: generate_user_profile(docs_aspects_list, docs_aspects_confidence_list, entities_docs_counts), MapType(StringType(), ArrayType(FloatType()), False)) users_profile_df = page_views_by_user_df \ .withColumn('views', get_list_len_udf('document_id_pv_list')) \ .withColumn('categories', generate_categories_user_profile_map_udf('category_id_lists', 'cat_confidence_level_lists')) \ .withColumn('topics', generate_topics_user_profile_map_udf('topic_id_lists', 'top_confidence_level_lists')) \ .withColumn('entities', generate_entities_user_profile_map_udf('entity_id_lists', 'ent_confidence_level_lists')) \ .select( F.col('uuid_pv').alias('uuid'), F.col('document_id_pv_list').alias('doc_ids'), 'views', 'categories', 'topics', 'entities') if evaluation: table_name = 'user_profiles_eval' else: table_name = 'user_profiles' users_profile_df.write.parquet(OUTPUT_BUCKET_FOLDER + table_name, mode='overwrite') finish_time = time.time() print("Elapsed min: ", (finish_time - start_time) / 60) spark.stop()
CUDA-Optimized/FastSpeech/waveglow
waveglow
denoiser
# ***************************************************************************** # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ***************************************************************************** import sys sys.path.append('tacotron2') import torch from common.layers import STFT class Denoiser(torch.nn.Module): """ Removes model bias from audio produced with waveglow """ def __init__(self, waveglow, cpu_run=False, filter_length=1024, n_overlap=4, win_length=1024, mode='zeros'): super(Denoiser, self).__init__() if cpu_run: self.stft = STFT(filter_length=filter_length, hop_length=int(filter_length/n_overlap), win_length=win_length) else: self.stft = STFT(filter_length=filter_length, hop_length=int(filter_length/n_overlap), win_length=win_length).cuda() if mode == 'zeros': mel_input = torch.zeros( (1, 80, 88), dtype=waveglow.upsample.weight.dtype, device=waveglow.upsample.weight.device) elif mode == 'normal': mel_input = torch.randn( (1, 80, 88), dtype=waveglow.upsample.weight.dtype, device=waveglow.upsample.weight.device) else: raise Exception("Mode {} if not supported".format(mode)) with torch.no_grad(): bias_audio = waveglow.infer(mel_input, sigma=0.0).float() bias_spec, _ = self.stft.transform(bias_audio) self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None]) def forward(self, audio, strength=0.1): audio_spec, audio_angles = self.stft.transform(audio.float()) audio_spec_denoised = audio_spec - self.bias_spec * strength audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0) audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles) return audio_denoised
TensorFlow2/Recommendation/SIM/scripts
scripts
download_amazon_books_2014
#!/bin/bash # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. AMAZON_BOOKS_2014_DESTINATION=/data/amazon_books_2014 mkdir -p $AMAZON_BOOKS_2014_DESTINATION if [ ! -f $AMAZON_BOOKS_2014_DESTINATION/meta_Books.json ]; then echo "Amazon Books 2014 metadata is not found. Proceeds to download it." wget http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/meta_Books.json.gz -P $AMAZON_BOOKS_2014_DESTINATION gunzip $AMAZON_BOOKS_2014_DESTINATION/meta_Books.json.gz else echo "Amazon Books 2014 metadata is already downloaded." fi if [ ! -f $AMAZON_BOOKS_2014_DESTINATION/reviews_Books.json ]; then echo "Amazon Books 2014 reviews are not found. Proceeds to download it." wget http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Books.json.gz -P $AMAZON_BOOKS_2014_DESTINATION gunzip $AMAZON_BOOKS_2014_DESTINATION/reviews_Books.json.gz else echo "Amazon Books 2014 reviews are already downloaded." fi
PyTorch/SpeechSynthesis/FastPitch/common
common
gpu_affinity
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import math import os import pathlib import re import pynvml pynvml.nvmlInit() def systemGetDriverVersion(): return pynvml.nvmlSystemGetDriverVersion() def deviceGetCount(): return pynvml.nvmlDeviceGetCount() class device: # assume nvml returns list of 64 bit ints _nvml_affinity_elements = math.ceil(os.cpu_count() / 64) def __init__(self, device_idx): super().__init__() self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx) def getName(self): return pynvml.nvmlDeviceGetName(self.handle) def getCpuAffinity(self): affinity_string = '' for j in pynvml.nvmlDeviceGetCpuAffinity( self.handle, device._nvml_affinity_elements ): # assume nvml returns list of 64 bit ints affinity_string = '{:064b}'.format(j) + affinity_string affinity_list = [int(x) for x in affinity_string] affinity_list.reverse() # so core 0 is in 0th element of list ret = [i for i, e in enumerate(affinity_list) if e != 0] return ret def set_socket_affinity(gpu_id): dev = device(gpu_id) affinity = dev.getCpuAffinity() os.sched_setaffinity(0, affinity) def set_single_affinity(gpu_id): dev = device(gpu_id) affinity = dev.getCpuAffinity() os.sched_setaffinity(0, affinity[:1]) def set_single_unique_affinity(gpu_id, nproc_per_node): devices = [device(i) for i in range(nproc_per_node)] socket_affinities = [dev.getCpuAffinity() for dev in devices] siblings_list = get_thread_siblings_list() siblings_dict = dict(siblings_list) # remove siblings for idx, socket_affinity in enumerate(socket_affinities): socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values())) affinities = [] assigned = [] for socket_affinity in socket_affinities: for core in socket_affinity: if core not in assigned: affinities.append([core]) assigned.append(core) break os.sched_setaffinity(0, affinities[gpu_id]) def set_socket_unique_affinity(gpu_id, nproc_per_node, mode): device_ids = [device(i) for i in range(nproc_per_node)] socket_affinities = [dev.getCpuAffinity() for dev in device_ids] siblings_list = get_thread_siblings_list() siblings_dict = dict(siblings_list) # remove siblings for idx, socket_affinity in enumerate(socket_affinities): socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values())) socket_affinities_to_device_ids = collections.defaultdict(list) for idx, socket_affinity in enumerate(socket_affinities): socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx) for socket_affinity, device_ids in socket_affinities_to_device_ids.items(): devices_per_group = len(device_ids) cores_per_device = len(socket_affinity) // devices_per_group for group_id, device_id in enumerate(device_ids): if device_id == gpu_id: if mode == 'interleaved': affinity = list(socket_affinity[group_id::devices_per_group]) elif mode == 'continuous': affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device]) else: raise RuntimeError('Unknown set_socket_unique_affinity mode') # reintroduce siblings affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict] os.sched_setaffinity(0, affinity) def get_thread_siblings_list(): path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list' thread_siblings_list = [] pattern = re.compile(r'(\d+)\D(\d+)') for fname in pathlib.Path(path[0]).glob(path[1:]): with open(fname) as f: content = f.read().strip() res = pattern.findall(content) if res: pair = tuple(map(int, res[0])) thread_siblings_list.append(pair) return thread_siblings_list def set_affinity(gpu_id, nproc_per_node, mode='socket'): if mode == 'socket': set_socket_affinity(gpu_id) elif mode == 'single': set_single_affinity(gpu_id) elif mode == 'single_unique': set_single_unique_affinity(gpu_id, nproc_per_node) elif mode == 'socket_unique_interleaved': set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved') elif mode == 'socket_unique_continuous': set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous') else: raise RuntimeError('Unknown affinity mode') affinity = os.sched_getaffinity(0) return affinity
TensorFlow2/Segmentation/Contrib/UNet3P
UNet3P
train
""" Training script """ import numpy as np from datetime import datetime, timedelta import hydra from omegaconf import DictConfig import tensorflow as tf from tensorflow.keras import mixed_precision from tensorflow.keras.callbacks import ( EarlyStopping, ModelCheckpoint, TensorBoard, CSVLogger ) from data_generators import data_generator from data_preparation.verify_data import verify_data from utils.general_utils import create_directory, join_paths, set_gpus, \ suppress_warnings from models.model import prepare_model from losses.loss import DiceCoefficient from losses.unet_loss import unet3p_hybrid_loss from callbacks.timing_callback import TimingCallback def create_training_folders(cfg: DictConfig): """ Create directories to store Model CheckPoint and TensorBoard logs. """ create_directory( join_paths( cfg.WORK_DIR, cfg.CALLBACKS.MODEL_CHECKPOINT.PATH ) ) create_directory( join_paths( cfg.WORK_DIR, cfg.CALLBACKS.TENSORBOARD.PATH ) ) def train(cfg: DictConfig): """ Training method """ # suppress TensorFlow and DALI warnings suppress_warnings() print("Verifying data ...") verify_data(cfg) if cfg.MODEL.TYPE == "unet3plus_deepsup_cgm": raise ValueError( "UNet3+ with Deep Supervision and Classification Guided Module" "\nModel exist but training script is not supported for this variant" "please choose other variants from config file" ) if cfg.USE_MULTI_GPUS.VALUE: # change number of visible gpus for training set_gpus(cfg.USE_MULTI_GPUS.GPU_IDS) # update batch size according to available gpus data_generator.update_batch_size(cfg) # create folders to store training checkpoints and logs create_training_folders(cfg) if cfg.OPTIMIZATION.AMP: print("Enabling Automatic Mixed Precision(AMP) training") policy = mixed_precision.Policy('mixed_float16') mixed_precision.set_global_policy(policy) if cfg.OPTIMIZATION.XLA: print("Enabling Accelerated Linear Algebra(XLA) training") tf.config.optimizer.set_jit(True) # create model strategy = None if cfg.USE_MULTI_GPUS.VALUE: # multi gpu training using tensorflow mirrored strategy strategy = tf.distribute.MirroredStrategy( cross_device_ops=tf.distribute.HierarchicalCopyAllReduce() ) print('Number of visible gpu devices: {}'.format(strategy.num_replicas_in_sync)) with strategy.scope(): optimizer = tf.keras.optimizers.Adam( learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE ) # optimizer if cfg.OPTIMIZATION.AMP: optimizer = mixed_precision.LossScaleOptimizer( optimizer, dynamic=True ) dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES) dice_coef = tf.keras.metrics.MeanMetricWrapper(name="dice_coef", fn=dice_coef) model = prepare_model(cfg, training=True) else: optimizer = tf.keras.optimizers.Adam( learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE ) # optimizer if cfg.OPTIMIZATION.AMP: optimizer = mixed_precision.LossScaleOptimizer( optimizer, dynamic=True ) dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES) dice_coef = tf.keras.metrics.MeanMetricWrapper(name="dice_coef", fn=dice_coef) model = prepare_model(cfg, training=True) model.compile( optimizer=optimizer, loss=unet3p_hybrid_loss, metrics=[dice_coef], ) model.summary() # data generators train_generator = data_generator.get_data_generator(cfg, "TRAIN", strategy) val_generator = data_generator.get_data_generator(cfg, "VAL", strategy) # verify generator # for i, (batch_images, batch_mask) in enumerate(val_generator): # print(len(batch_images)) # if i >= 3: break # the tensorboard log directory will be a unique subdirectory # based on the start time for the run tb_log_dir = join_paths( cfg.WORK_DIR, cfg.CALLBACKS.TENSORBOARD.PATH, "{}".format(datetime.now().strftime("%Y.%m.%d.%H.%M.%S")) ) print("TensorBoard directory\n" + tb_log_dir) checkpoint_path = join_paths( cfg.WORK_DIR, cfg.CALLBACKS.MODEL_CHECKPOINT.PATH, f"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5" ) print("Weights path\n" + checkpoint_path) csv_log_path = join_paths( cfg.WORK_DIR, cfg.CALLBACKS.CSV_LOGGER.PATH, f"training_logs_{cfg.MODEL.TYPE}.csv" ) print("Logs path\n" + csv_log_path) # evaluation metric evaluation_metric = "val_dice_coef" if len(model.outputs) > 1: evaluation_metric = f"val_{model.output_names[0]}_dice_coef" # Timing, TensorBoard, EarlyStopping, ModelCheckpoint, CSVLogger callbacks timing_callback = TimingCallback() callbacks = [ TensorBoard(log_dir=tb_log_dir, write_graph=False, profile_batch=0), EarlyStopping( patience=cfg.CALLBACKS.EARLY_STOPPING.PATIENCE, verbose=cfg.VERBOSE ), ModelCheckpoint( checkpoint_path, verbose=cfg.VERBOSE, save_weights_only=cfg.CALLBACKS.MODEL_CHECKPOINT.SAVE_WEIGHTS_ONLY, save_best_only=cfg.CALLBACKS.MODEL_CHECKPOINT.SAVE_BEST_ONLY, monitor=evaluation_metric, mode="max" ), CSVLogger( csv_log_path, append=cfg.CALLBACKS.CSV_LOGGER.APPEND_LOGS ), timing_callback ] training_steps = data_generator.get_iterations(cfg, mode="TRAIN") validation_steps = data_generator.get_iterations(cfg, mode="VAL") # start training model.fit( x=train_generator, steps_per_epoch=training_steps, validation_data=val_generator, validation_steps=validation_steps, epochs=cfg.HYPER_PARAMETERS.EPOCHS, callbacks=callbacks, workers=cfg.DATALOADER_WORKERS, ) training_time = timing_callback.train_end_time - timing_callback.train_start_time training_time = timedelta(seconds=training_time) print(f"Total training time {training_time}") mean_time = np.mean(timing_callback.batch_time) throughput = data_generator.get_batch_size(cfg) / mean_time print(f"Training latency: {round(mean_time * 1e3, 2)} msec") print(f"Training throughput/FPS: {round(throughput, 2)} samples/sec") @hydra.main(version_base=None, config_path="configs", config_name="config") def main(cfg: DictConfig): """ Read config file and pass to train method for training """ train(cfg) if __name__ == "__main__": main()
TensorFlow/Detection/SSD/models/research/object_detection/builders
builders
post_processing_builder
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Builder function for post processing operations.""" import functools import tensorflow as tf from object_detection.core import post_processing from object_detection.protos import post_processing_pb2 def build(post_processing_config): """Builds callables for post-processing operations. Builds callables for non-max suppression and score conversion based on the configuration. Non-max suppression callable takes `boxes`, `scores`, and optionally `clip_window`, `parallel_iterations` `masks, and `scope` as inputs. It returns `nms_boxes`, `nms_scores`, `nms_classes` `nms_masks` and `num_detections`. See post_processing.batch_multiclass_non_max_suppression for the type and shape of these tensors. Score converter callable should be called with `input` tensor. The callable returns the output from one of 3 tf operations based on the configuration - tf.identity, tf.sigmoid or tf.nn.softmax. See tensorflow documentation for argument and return value descriptions. Args: post_processing_config: post_processing.proto object containing the parameters for the post-processing operations. Returns: non_max_suppressor_fn: Callable for non-max suppression. score_converter_fn: Callable for score conversion. Raises: ValueError: if the post_processing_config is of incorrect type. """ if not isinstance(post_processing_config, post_processing_pb2.PostProcessing): raise ValueError('post_processing_config not of type ' 'post_processing_pb2.Postprocessing.') non_max_suppressor_fn = _build_non_max_suppressor( post_processing_config.batch_non_max_suppression) score_converter_fn = _build_score_converter( post_processing_config.score_converter, post_processing_config.logit_scale) return non_max_suppressor_fn, score_converter_fn def _build_non_max_suppressor(nms_config): """Builds non-max suppresson based on the nms config. Args: nms_config: post_processing_pb2.PostProcessing.BatchNonMaxSuppression proto. Returns: non_max_suppressor_fn: Callable non-max suppressor. Raises: ValueError: On incorrect iou_threshold or on incompatible values of max_total_detections and max_detections_per_class. """ if nms_config.iou_threshold < 0 or nms_config.iou_threshold > 1.0: raise ValueError('iou_threshold not in [0, 1.0].') if nms_config.max_detections_per_class > nms_config.max_total_detections: raise ValueError('max_detections_per_class should be no greater than ' 'max_total_detections.') non_max_suppressor_fn = functools.partial( post_processing.batch_multiclass_non_max_suppression, score_thresh=nms_config.score_threshold, iou_thresh=nms_config.iou_threshold, max_size_per_class=nms_config.max_detections_per_class, max_total_size=nms_config.max_total_detections, use_static_shapes=nms_config.use_static_shapes) return non_max_suppressor_fn def _score_converter_fn_with_logit_scale(tf_score_converter_fn, logit_scale): """Create a function to scale logits then apply a Tensorflow function.""" def score_converter_fn(logits): scaled_logits = tf.divide(logits, logit_scale, name='scale_logits') return tf_score_converter_fn(scaled_logits, name='convert_scores') score_converter_fn.__name__ = '%s_with_logit_scale' % ( tf_score_converter_fn.__name__) return score_converter_fn def _build_score_converter(score_converter_config, logit_scale): """Builds score converter based on the config. Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on the config. Args: score_converter_config: post_processing_pb2.PostProcessing.score_converter. logit_scale: temperature to use for SOFTMAX score_converter. Returns: Callable score converter op. Raises: ValueError: On unknown score converter. """ if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY: return _score_converter_fn_with_logit_scale(tf.identity, logit_scale) if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID: return _score_converter_fn_with_logit_scale(tf.sigmoid, logit_scale) if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX: return _score_converter_fn_with_logit_scale(tf.nn.softmax, logit_scale) raise ValueError('Unknown score converter.')
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/scripts
scripts
denoiser_to_json
#!/usr/bin/env python3 ## # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import json import torch import sys import os from scipy.signal import get_window import librosa.util as librosa_util WAVEGLOW_CONFIG = { "n_mel_channels": 80, "n_flows": 12, "n_group": 8, "n_early_every": 4, "n_early_size": 2, "WN_config": { "n_layers": 8, "kernel_size": 3, "n_channels": 256 } } def gen_win_sq( denoiser): window = denoiser.stft.window win_length = denoiser.stft.win_length n_fft = denoiser.stft.filter_length # Compute the squared window at the desired length win_sq = get_window(window, win_length, fftbins=True) win_sq = librosa_util.normalize(win_sq, norm=None)**2 win_sq = librosa_util.pad_center(win_sq, n_fft) return win_sq if len(sys.argv) < 4 or len(sys.argv) > 5: print("USAGE:") print( "\t%s <tacotron2 directory> <waveglow checkpoint> <json output> [strength, default=0.1]" % sys.argv[0]) sys.exit(1) json_path = sys.argv[3] sys.path.append(sys.argv[1]) # must be imported after path is modified from import_utils import load_waveglow from waveglow.denoiser import Denoiser strength = 0.1 if len(sys.argv) == 5: strength = float(sys.argv[4]) print("Building denoiser") waveglow = load_waveglow(sys.argv[2], WAVEGLOW_CONFIG) denoiser = Denoiser(waveglow).cuda() statedict = {} statedict["denoiser.stft.forward_basis"] = denoiser.stft.forward_basis.cpu( ).numpy().tolist() statedict["denoiser.stft.inverse_basis"] = denoiser.stft.inverse_basis.cpu( ).numpy().tolist() statedict["denoiser.stft.win_sq"] = gen_win_sq(denoiser).tolist() statedict["denoiser.bias_spec"] = ( denoiser.bias_spec*strength).cpu().numpy().tolist() with open(json_path, "w") as fout: json.dump(statedict, fout, indent=2) print("Wrote to '%s'" % json_path)
TensorFlow/Segmentation/UNet_Industrial/utils/hooks
hooks
profiler_hook
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================== # # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================== import os import json import time import operator import numpy as np import tensorflow as tf import dllogger as Logger __all__ = ["ProfilerHook"] class ProfilerHook(tf.train.SessionRunHook): def __init__(self, global_batch_size, sample_dir, log_every=10, warmup_steps=20, is_training=True): self._warmup_steps = warmup_steps self._global_batch_size = global_batch_size self._current_step = 0 self._log_every = log_every self._t0 = None self._start_training_time = None self._is_training = is_training self._sample_dir = sample_dir self._processing_speed_arr = list() @staticmethod def moving_average(a, n=4): if len(a) < n: return [np.mean(a)] ret = np.cumsum(a, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret[n - 1:] / n def after_create_session(self, session, coord): params_count = tf.get_default_graph().get_tensor_by_name("trainable_parameters_count_ref:0") _params_count = session.run(params_count) Logger._stage = "train" if self._is_training else "eval" Logger.log( step=('PARAMETER'), data={"# Total Trainable Parameters": int(_params_count)}, verbosity=Logger.Verbosity.DEFAULT ) Logger.metadata( metric="{prefix}.avg_ips".format(prefix=Logger._stage), metadata={"unit": "imgs/s", "format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": Logger._stage.upper()} ) for ths in [0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99]: Logger.metadata( metric="{prefix}.IoU_THS_{ths}".format(prefix=Logger._stage, ths=ths), metadata={"format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": Logger._stage.upper()} ) if self._is_training: Logger.metadata( metric="{prefix}.learning_rate".format(prefix=Logger._stage), metadata={"format": ":.3e", "GOAL": "NONE", "STAGE": Logger._stage.upper()} ) Logger.metadata( metric="{prefix}.weight_decay".format(prefix=Logger._stage), metadata={"format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": Logger._stage.upper()} ) Logger.metadata( metric="{prefix}.reconstruction_loss".format(prefix=Logger._stage), metadata={"format": ":.3f", "GOAL": "MINIMIZE", "STAGE": Logger._stage.upper()} ) Logger.metadata( metric="{prefix}.total_loss".format(prefix=Logger._stage), metadata={"format": ":.3f", "GOAL": "MINIMIZE", "STAGE": Logger._stage.upper()} ) Logger.metadata( metric="{prefix}.true_positives".format(prefix=Logger._stage), metadata={"STAGE": Logger._stage.upper()} ) Logger.metadata( metric="{prefix}.true_negatives".format(prefix=Logger._stage), metadata={"STAGE": Logger._stage.upper()} ) Logger.metadata( metric="{prefix}.false_positives".format(prefix=Logger._stage), metadata={"STAGE": Logger._stage.upper()} ) Logger.metadata( metric="{prefix}.false_negatives".format(prefix=Logger._stage), metadata={"STAGE": Logger._stage.upper()} ) Logger.metadata( metric="{prefix}.true_positive_rate".format(prefix=Logger._stage), metadata={"STAGE": Logger._stage.upper()} ) Logger.metadata( metric="{prefix}.true_negative_rate".format(prefix=Logger._stage), metadata={"STAGE": Logger._stage.upper()} ) self._start_training_time = time.time() def before_run(self, run_context): self._current_step += 1 request_fetches = dict() if self._current_step % self._log_every == 0: additional_fetches = { 'total_loss': tf.get_default_graph().get_tensor_by_name("losses/total_loss_ref:0"), 'iou_scores': dict(), 'confusion_matrix': dict() } if self._is_training: additional_fetches["weight_decay"] = tf.get_default_graph().get_tensor_by_name("losses/l2_loss_ref:0") additional_fetches["reconstruction_loss"] = tf.get_default_graph( ).get_tensor_by_name("losses/reconstruction_loss_ref:0") additional_fetches["learning_rate"] = tf.get_default_graph( ).get_tensor_by_name("optimizers/learning_rate_ref:0") # ==================== Samples ==================== # if self._sample_dir is not None and self._is_training: additional_fetches["samples"] = {} additional_fetches["samples"]["input_image"] = tf.get_default_graph( ).get_tensor_by_name("input_image_jpeg_ref:0") additional_fetches["samples"]["mask"] = tf.get_default_graph().get_tensor_by_name("mask_sample_ref:0") for threshold in [None, 0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99]: additional_fetches["samples"][str(threshold)] = tf.get_default_graph().get_tensor_by_name( "output_sample_ths_%s_ref:0" % threshold ) # ==================== Evaluation Metrics ==================== # for threshold in [0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99]: if threshold is not None: additional_fetches["iou_scores"][str(threshold)] = tf.get_default_graph().get_tensor_by_name( "IoU_Metrics/iou_score_ths_%s_ref:0" % threshold ) additional_fetches["confusion_matrix"]["tp"] = tf.get_default_graph( ).get_tensor_by_name("Confusion_Matrix/true_positives_ref:0") additional_fetches["confusion_matrix"]["tn"] = tf.get_default_graph( ).get_tensor_by_name("Confusion_Matrix/true_negatives_ref:0") additional_fetches["confusion_matrix"]["fp"] = tf.get_default_graph( ).get_tensor_by_name("Confusion_Matrix/false_positives_ref:0") additional_fetches["confusion_matrix"]["fn"] = tf.get_default_graph( ).get_tensor_by_name("Confusion_Matrix/false_negatives_ref:0") # Update `request_fetches` dict request_fetches.update(additional_fetches) print("\n######### START: %d ##############" % self._current_step) self._t0 = time.time() return tf.train.SessionRunArgs(fetches=request_fetches) def after_run(self, run_context, run_values): batch_time = time.time() - self._t0 imgs_per_sec = int(self._global_batch_size / batch_time) is_log_step = self._current_step % self._log_every == 0 if is_log_step: if self._current_step > self._warmup_steps: imgs_per_sec = float(ProfilerHook.moving_average(self._processing_speed_arr, n=30)[-1]) Logger.log( step=(self._current_step,), data={"{prefix}.avg_ips".format(prefix=Logger._stage): float(imgs_per_sec)}, verbosity=Logger.Verbosity.DEFAULT ) if self._is_training: Logger.log( step=(self._current_step,), data={"{prefix}.weight_decay".format(prefix=Logger._stage): float(run_values.results["weight_decay"])}, verbosity=Logger.Verbosity.DEFAULT ) Logger.log( step=(self._current_step,), data={"{prefix}.reconstruction_loss".format(prefix=Logger._stage): float(run_values.results["reconstruction_loss"])}, verbosity=Logger.Verbosity.DEFAULT ) Logger.log( step=(self._current_step,), data={"{prefix}.total_loss".format(prefix=Logger._stage): float(run_values.results["total_loss"])}, verbosity=Logger.Verbosity.DEFAULT ) Logger.log( step=(self._current_step,), data={"{prefix}.learning_rate".format(prefix=Logger._stage): float(run_values.results["learning_rate"])}, verbosity=Logger.Verbosity.DEFAULT ) for key, val in sorted(run_values.results["iou_scores"].items(), key=operator.itemgetter(0)): Logger.log( step=(self._current_step,), data={"{prefix}.IoU_THS_{ths}".format(prefix=Logger._stage, ths=key): float(val)}, verbosity=Logger.Verbosity.DEFAULT ) Logger.log( step=(self._current_step,), data={"{prefix}.true_positives".format(prefix=Logger._stage): str(run_values.results["confusion_matrix"]["tp"])}, verbosity=Logger.Verbosity.DEFAULT ) Logger.log( step=(self._current_step,), data={"{prefix}.true_negatives".format(prefix=Logger._stage): str(run_values.results["confusion_matrix"]["tn"])}, verbosity=Logger.Verbosity.DEFAULT ) Logger.log( step=(self._current_step,), data={"{prefix}.false_positives".format(prefix=Logger._stage): str(run_values.results["confusion_matrix"]["fp"])}, verbosity=Logger.Verbosity.DEFAULT ) Logger.log( step=(self._current_step,), data={"{prefix}.false_negatives".format(prefix=Logger._stage): str(run_values.results["confusion_matrix"]["fn"])}, verbosity=Logger.Verbosity.DEFAULT ) if self._sample_dir is not None and self._is_training: for key in sorted(run_values.results["samples"].keys(), key=operator.itemgetter(0)): with open( os.path.join(self._sample_dir, "sample_step_%04d_ths_%s.jpeg" % (self._current_step, key)), 'wb' ) as fd: fd.write(run_values.results["samples"][key]) with open( os.path.join(self._sample_dir, "sample_step_%04d_mask.jpeg" % self._current_step), 'wb' ) as fd: fd.write(run_values.results["samples"]["mask"]) print("######### STOP: %d ##############" % self._current_step) elif self._current_step > self._warmup_steps: # Do not store speed for log step due to additional fetches self._processing_speed_arr.append(imgs_per_sec) def end(self, session): try: avg_processing_speed = float(ProfilerHook.moving_average(self._processing_speed_arr, n=100)[-1]) except: avg_processing_speed = float(np.mean(self._processing_speed_arr)) total_processing_time = time.time() - self._start_training_time total_processing_hours, rem = divmod(total_processing_time, 3600) print("\n============== Final Summary ==============") Logger.log( step=(), data={"{prefix}.avg_ips".format(prefix=Logger._stage): avg_processing_speed}, verbosity=Logger.Verbosity.DEFAULT ) perf_dict = {'throughput': str(avg_processing_speed), 'processing_time': str(total_processing_time)} perf_filename = "performances_%s.json" % ("train" if self._is_training else "eval") with open(os.path.join(self._sample_dir, "..", perf_filename), 'w') as f: json.dump(perf_dict, f)
TensorFlow/Detection/SSD/configs
configs
ssd320_bench
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # SSD with Resnet 50 v1 FPN feature extractor, shared box predictor and focal # loss (a.k.a Retinanet). # See Lin et al, https://arxiv.org/abs/1708.02002 # Trained on COCO, initialized from Imagenet classification checkpoint model { ssd { inplace_batchnorm_update: true freeze_batchnorm: true num_classes: 90 box_coder { faster_rcnn_box_coder { y_scale: 10.0 x_scale: 10.0 height_scale: 5.0 width_scale: 5.0 } } matcher { argmax_matcher { matched_threshold: 0.5 unmatched_threshold: 0.5 ignore_thresholds: false negatives_lower_than_unmatched: true force_match_for_each_row: true use_matmul_gather: true } } similarity_calculator { iou_similarity { } } encode_background_as_zeros: true anchor_generator { multiscale_anchor_generator { min_level: 3 max_level: 7 anchor_scale: 4.0 aspect_ratios: [1.0, 2.0, 0.5] scales_per_octave: 2 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { weight_shared_convolutional_box_predictor { depth: 256 class_prediction_bias_init: -4.6 conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.0004 } } initializer { random_normal_initializer { stddev: 0.01 mean: 0.0 } } batch_norm { scale: true, decay: 0.997, epsilon: 0.001, } } num_layers_before_predictor: 4 kernel_size: 3 } } feature_extractor { type: 'ssd_resnet50_v1_fpn' fpn { min_level: 3 max_level: 7 } min_depth: 16 depth_multiplier: 1.0 conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.0004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } batch_norm { scale: true, decay: 0.997, epsilon: 0.001, } } override_base_feature_extractor_hyperparams: true } loss { classification_loss { weighted_sigmoid_focal { alpha: 0.25 gamma: 2.0 } } localization_loss { weighted_smooth_l1 { } } classification_weight: 1.0 localization_weight: 1.0 } normalize_loss_by_num_matches: true normalize_loc_loss_by_codesize: true post_processing { batch_non_max_suppression { score_threshold: 1e-8 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 100 } score_converter: SIGMOID } } } train_config: { fine_tune_checkpoint: "/checkpoints/resnet_v1_50/model.ckpt" fine_tune_checkpoint_type: "classification" batch_size: 32 sync_replicas: true startup_delay_steps: 0 replicas_to_aggregate: 8 num_steps: 1250 data_augmentation_options { random_horizontal_flip { } } data_augmentation_options { random_crop_image { min_object_covered: 0.0 min_aspect_ratio: 0.75 max_aspect_ratio: 3.0 min_area: 0.75 max_area: 1.0 overlap_thresh: 0.0 } } optimizer { momentum_optimizer: { learning_rate: { cosine_decay_learning_rate { learning_rate_base: .02000000000000000000 total_steps: 1250 warmup_learning_rate: .00866640000000000000 warmup_steps: 400 } } momentum_optimizer_value: 0.9 } use_moving_average: false } max_number_of_boxes: 100 unpad_groundtruth_tensors: false } train_input_reader: { tf_record_input_reader { input_path: "/data/coco2017_tfrecords/*train*" } label_map_path: "object_detection/data/mscoco_label_map.pbtxt" } eval_config: { metrics_set: "coco_detection_metrics" use_moving_averages: false num_examples: 8000 } eval_input_reader: { tf_record_input_reader { input_path: "/data/coco2017_tfrecords/*val*" } label_map_path: "object_detection/data/mscoco_label_map.pbtxt" shuffle: false num_readers: 1 }
TensorFlow2/Segmentation/Contrib/UNet3P/models
models
unet3plus_deep_supervision
""" UNet3+ with Deep Supervision """ import tensorflow as tf import tensorflow.keras as k from .unet3plus_utils import conv_block def unet3plus_deepsup(encoder_layer, output_channels, filters, training=False): """ UNet_3Plus with Deep Supervision """ """ Encoder """ e1 = encoder_layer[0] e2 = encoder_layer[1] e3 = encoder_layer[2] e4 = encoder_layer[3] e5 = encoder_layer[4] """ Decoder """ cat_channels = filters[0] cat_blocks = len(filters) upsample_channels = cat_blocks * cat_channels """ d4 """ e1_d4 = k.layers.MaxPool2D(pool_size=(8, 8))(e1) # 320*320*64 --> 40*40*64 e1_d4 = conv_block(e1_d4, cat_channels, n=1) # 320*320*64 --> 40*40*64 e2_d4 = k.layers.MaxPool2D(pool_size=(4, 4))(e2) # 160*160*128 --> 40*40*128 e2_d4 = conv_block(e2_d4, cat_channels, n=1) # 160*160*128 --> 40*40*64 e3_d4 = k.layers.MaxPool2D(pool_size=(2, 2))(e3) # 80*80*256 --> 40*40*256 e3_d4 = conv_block(e3_d4, cat_channels, n=1) # 80*80*256 --> 40*40*64 e4_d4 = conv_block(e4, cat_channels, n=1) # 40*40*512 --> 40*40*64 e5_d4 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(e5) # 80*80*256 --> 40*40*256 e5_d4 = conv_block(e5_d4, cat_channels, n=1) # 20*20*1024 --> 20*20*64 d4 = k.layers.concatenate([e1_d4, e2_d4, e3_d4, e4_d4, e5_d4]) d4 = conv_block(d4, upsample_channels, n=1) # 40*40*320 --> 40*40*320 """ d3 """ e1_d3 = k.layers.MaxPool2D(pool_size=(4, 4))(e1) # 320*320*64 --> 80*80*64 e1_d3 = conv_block(e1_d3, cat_channels, n=1) # 80*80*64 --> 80*80*64 e2_d3 = k.layers.MaxPool2D(pool_size=(2, 2))(e2) # 160*160*256 --> 80*80*256 e2_d3 = conv_block(e2_d3, cat_channels, n=1) # 80*80*256 --> 80*80*64 e3_d3 = conv_block(e3, cat_channels, n=1) # 80*80*512 --> 80*80*64 e4_d3 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d4) # 40*40*320 --> 80*80*320 e4_d3 = conv_block(e4_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64 e5_d3 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(e5) # 20*20*320 --> 80*80*320 e5_d3 = conv_block(e5_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64 d3 = k.layers.concatenate([e1_d3, e2_d3, e3_d3, e4_d3, e5_d3]) d3 = conv_block(d3, upsample_channels, n=1) # 80*80*320 --> 80*80*320 """ d2 """ e1_d2 = k.layers.MaxPool2D(pool_size=(2, 2))(e1) # 320*320*64 --> 160*160*64 e1_d2 = conv_block(e1_d2, cat_channels, n=1) # 160*160*64 --> 160*160*64 e2_d2 = conv_block(e2, cat_channels, n=1) # 160*160*256 --> 160*160*64 d3_d2 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d3) # 80*80*320 --> 160*160*320 d3_d2 = conv_block(d3_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64 d4_d2 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d4) # 40*40*320 --> 160*160*320 d4_d2 = conv_block(d4_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64 e5_d2 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(e5) # 20*20*320 --> 160*160*320 e5_d2 = conv_block(e5_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64 d2 = k.layers.concatenate([e1_d2, e2_d2, d3_d2, d4_d2, e5_d2]) d2 = conv_block(d2, upsample_channels, n=1) # 160*160*320 --> 160*160*320 """ d1 """ e1_d1 = conv_block(e1, cat_channels, n=1) # 320*320*64 --> 320*320*64 d2_d1 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d2) # 160*160*320 --> 320*320*320 d2_d1 = conv_block(d2_d1, cat_channels, n=1) # 160*160*320 --> 160*160*64 d3_d1 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d3) # 80*80*320 --> 320*320*320 d3_d1 = conv_block(d3_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64 d4_d1 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(d4) # 40*40*320 --> 320*320*320 d4_d1 = conv_block(d4_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64 e5_d1 = k.layers.UpSampling2D(size=(16, 16), interpolation='bilinear')(e5) # 20*20*320 --> 320*320*320 e5_d1 = conv_block(e5_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64 d1 = k.layers.concatenate([e1_d1, d2_d1, d3_d1, d4_d1, e5_d1, ]) d1 = conv_block(d1, upsample_channels, n=1) # 320*320*320 --> 320*320*320 # last layer does not have batch norm and relu d1 = conv_block(d1, output_channels, n=1, is_bn=False, is_relu=False) if output_channels == 1: d1 = k.layers.Activation('sigmoid', dtype='float32')(d1) else: # d1 = k.activations.softmax(d1) d1 = k.layers.Activation('softmax', dtype='float32')(d1) """ Deep Supervision Part""" if training: d2 = conv_block(d2, output_channels, n=1, is_bn=False, is_relu=False) d3 = conv_block(d3, output_channels, n=1, is_bn=False, is_relu=False) d4 = conv_block(d4, output_channels, n=1, is_bn=False, is_relu=False) e5 = conv_block(e5, output_channels, n=1, is_bn=False, is_relu=False) # d1 = no need for up sampling d2 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d2) d3 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d3) d4 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(d4) e5 = k.layers.UpSampling2D(size=(16, 16), interpolation='bilinear')(e5) if output_channels == 1: d2 = k.layers.Activation('sigmoid', dtype='float32')(d2) d3 = k.layers.Activation('sigmoid', dtype='float32')(d3) d4 = k.layers.Activation('sigmoid', dtype='float32')(d4) e5 = k.layers.Activation('sigmoid', dtype='float32')(e5) else: d2 = k.layers.Activation('softmax', dtype='float32')(d2) d3 = k.layers.Activation('softmax', dtype='float32')(d3) d4 = k.layers.Activation('softmax', dtype='float32')(d4) e5 = k.layers.Activation('softmax', dtype='float32')(e5) if training: return [d1, d2, d3, d4, e5], 'UNet3Plus_DeepSup' else: return [d1, ], 'UNet3Plus_DeepSup'
TensorFlow/Translation/GNMT/variable_mgr
variable_mgr
allreduce
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for allreduce.""" from __future__ import print_function import collections as pycoll import re from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorflow.contrib.all_reduce.python import all_reduce from tensorflow.python.ops import collective_ops AllReduceSpecTuple = pycoll.namedtuple('AllReduceSpecTuple', 'alg shards limit') def parse_general_int(s): """Parse integer with power-of-2 suffix eg. 32k.""" mo = re.match(r'(\d+)([KkMGT]?)$', s) if mo: i, suffix = mo.group(1, 2) v = int(i) if suffix: if suffix == 'K' or suffix == 'k': v *= 1024 elif suffix == 'M': v *= (1024 * 1024) elif suffix == 'G': v *= (1024 * 1024 * 1024) elif suffix == 'T': v *= (1024 * 1024 * 1024 * 1024) else: raise ValueError('invalid integer string %s' % s) return v else: v = int(s) return v def parse_all_reduce_spec(all_reduce_spec): """Parse all_reduce_spec. Args: all_reduce_spec: a string specifying a combination of all-reduce algorithms to apply for gradient reduction. Returns: a list of AllReduceSpecTuple. Raises: ValueError: all_reduce_spec is not well-formed. An all_reduce_spec has BNF form: int ::= positive whole number g_int ::= int[KkMGT]? alg_spec ::= alg | alg#int range_spec ::= alg_spec | alg_spec/alg_spec spec ::= range_spec | range_spec:g_int:range_spec Not all syntactically correct specifications are supported. Examples of supported all_reduce_spec strings, with semantics explained: 'collective' == apply tf.collective_reduce operator to all tensors. 'collective#2' == apply tf.collective_reduce operator to all tensors, requesting up to 2 simultaneous transfers at each node, if feasible, by subdividing tensor by an additional factor of 2. 'xring' == apply ring all-reduce to all tensors 'xring#2' == apply ring all-reduce to all tensors, using two simultaneous transfer rings, each operating on 1/2 of each tensor. 'nccl' == apply NCCL all-reduce to all tensors (only works within a single worker process where all devices are GPUs) 'nccl/xring' == apply NCCL all-reduce to all tensors within each worker to produce at least one full-reduced (locally) value, then apply ring all-reduce to one such value from each worker, then apply NCCL broadcast to propagate those globally reduced values back to every device within each worker. 'pscpu' == Shuffle reduce using worker CPUs as the gather devices: each distributed tensor is reduced by copying all instances to one of the worker CPUs, computing the reduction there, then copying back to each participating device. Tensor reductions are assigned to specific CPUs round-robin. 'psgpu#4' == Arrange all GPUs across all workers into groups of 4. Each distributed tensor is shuffle reduced against one such group of 4 GPUs, selected round-robin. That is, each tensor is split across 4 shards for the reduction. 'pscpu:2k:pscpu#2:64k:xring' == Apply single-shard pscpu to tensors of size <= 2048 elements, apply 2-shard pscpu to tensors up to size 64k elements, apply xring to larger tensors. 'pscpu/pscpu#2' == Use shuffle gather to locally reduce each tensor on the worker's CPU, then use 2-shard shuffle to reduce those locally reduced tensors across workers (on the worker CPUs), then scatter the globally reduced values locally from each worker CPU. """ range_parts = all_reduce_spec.split(':') + ['-1'] if len(range_parts) % 2: raise ValueError('all_reduce_spec not well formed: %s' % all_reduce_spec) limit = 0 spec = [] alg = None shards = 1 for i, range_part in enumerate(range_parts): if i % 2 == 1: try: limit = parse_general_int(range_part) spec.append(AllReduceSpecTuple(alg=alg, shards=shards, limit=limit)) except ValueError: raise ValueError('all_reduce_spec (%s) contains non-integer range %s' % (all_reduce_spec, range_part)) else: alg = range_part alg_parts = range_part.split('#') alg = alg_parts[0] if len(alg_parts) > 1: try: shards = int(alg_parts[1]) except ValueError: raise ValueError('all_reduce_spec (%s) contains non-integer ' 'shards %s' % all_reduce_spec, alg_parts[1]) else: shards = 1 if alg not in [ 'nccl', 'nccl/xring', 'nccl/rechd', 'nccl/pscpu', 'xring', 'pscpu', 'psgpu', 'pscpu/pscpu', 'collective' ]: raise ValueError('all_reduce_spec (%s) contains invalid alg %s' % (all_reduce_spec, alg)) return spec def build_all_reduce_device_prefixes(job_name, num_tasks): """Build list of device prefix names for all_reduce. Args: job_name: 'worker', 'ps' or 'localhost'. num_tasks: number of jobs across which device names should be generated. Returns: A list of device name prefix strings. Each element spells out the full host name without adding the device. e.g. '/job:worker/task:0' """ if job_name != 'localhost': return ['/job:%s/task:%d' % (job_name, d) for d in range(0, num_tasks)] else: assert num_tasks == 1 return ['/job:%s' % job_name] def group_device_names(devices, group_size): """Group device names into groups of group_size. Args: devices: list of strings naming devices. group_size: int >= 1 Returns: list of lists of devices, where each inner list is group_size long, and each device appears at least once in an inner list. If len(devices) % group_size = 0 then each device will appear exactly once. Raises: ValueError: group_size > len(devices) """ num_devices = len(devices) if group_size > num_devices: raise ValueError('only %d devices, but group_size=%d' % (num_devices, group_size)) num_groups = ( num_devices // group_size + (1 if (num_devices % group_size != 0) else 0)) groups = [[] for i in range(num_groups)] for i in range(0, num_groups * group_size): groups[i % num_groups].append(devices[i % num_devices]) return groups def split_grads_by_size(threshold_size, device_grads): """Break gradients into two sets according to tensor size. Args: threshold_size: int size cutoff for small vs large tensor. device_grads: List of lists of (gradient, variable) tuples. The outer list is over devices. The inner list is over individual gradients. Returns: small_grads: Subset of device_grads where shape is <= theshold_size elements. large_grads: Subset of device_grads where shape is > threshold_size elements. """ small_grads = [] large_grads = [] for dl in device_grads: small_dl = [] large_dl = [] for (g, v) in dl: tensor_size = g.get_shape().num_elements() if tensor_size <= threshold_size: small_dl.append([g, v]) else: large_dl.append([g, v]) if small_dl: small_grads.append(small_dl) if large_dl: large_grads.append(large_dl) return small_grads, large_grads _instance_key = 1 def new_collective_instance_key(): """Returns a new instance key for use in defining a collective op.""" global _instance_key v = _instance_key _instance_key += 1 return v _group_key = 1 _group_key_table = dict() def collective_group_key(devices): """Returns a group key for the set of devices. Args: devices: list of strings naming devices in a collective group. Returns: int key uniquely identifying the set of device names. """ global _group_key global _group_key_table parsed = [tf.DeviceSpec.from_string(d) for d in devices] names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed]) concat = ','.join(names) if concat not in _group_key_table.keys(): new_key = _group_key _group_key += 1 _group_key_table[concat] = new_key rv = _group_key_table[concat] return rv def build_collective_reduce(input_tensors, num_workers, num_shards, red_op='Add', un_op='Id'): """Build a subgraph that does one full all-reduce, using the collective Op. Args: input_tensors: tensors within a single worker graph that are to be reduced together; must be one per device. num_workers: total number of workers with identical independent graphs that will be doing this same reduction. The reduction will actually include the corresponding tensors at all these workers. num_shards: number of shards into which to divide each per-tick chunk, normally 1 but could be higher on multi-data-path architectures. red_op: string naming the reduction op un_op: string naming the unary final op Returns: An array of final tensors, one per device, computed by the full reduction. Raises: ValueError: There must be at least two tensors over all the workers. """ group_size = len(input_tensors) * num_workers if group_size < 2: raise ValueError('num_workers * len(input_tensors) must be 2 or greater') devices = [t.device for t in input_tensors] num_devices = len(devices) group_key = collective_group_key(devices) instance_key = new_collective_instance_key() out_tensors = [] if num_shards == 1: subdiv_offsets = [0] elif num_shards == 2: if num_devices > 1: subdiv_offsets = [0, -(num_devices // 2)] else: subdiv_offsets = [0] else: raise ValueError('Unsupported num_shards %d' % num_shards) for d in range(num_devices): with tf.device(devices[d]): reduce_op = collective_ops.all_reduce(input_tensors[d], group_size, group_key, instance_key, red_op, un_op, subdiv_offsets) out_tensors.append(reduce_op) return out_tensors def broadcast_send(t, shape, dtype, group_size, group_key, instance_key): return collective_ops.broadcast_send(t, shape, dtype, group_size, group_key, instance_key) def broadcast_recv(shape, dtype, group_size, group_key, instance_key): return collective_ops.broadcast_recv(shape, dtype, group_size, group_key, instance_key) def sum_grad_and_var_all_reduce(single_session, grad_and_vars, num_workers, alg, gpu_indices, aux_devices=None, num_shards=1): """Apply all-reduce algorithm over specified gradient tensors.""" scaled_grads = [g for g, _ in grad_and_vars] if alg == 'collective': assert not single_session summed_grads = build_collective_reduce( scaled_grads, num_workers, num_shards, 'Add', 'Id') else: with tf.name_scope('allreduce'): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) if alg == 'nccl': summed_grads = all_reduce.build_nccl_all_reduce(scaled_grads, tf.add) elif alg == 'xring': summed_grads = all_reduce.build_ring_all_reduce( scaled_grads, num_workers, num_shards, gpu_indices, tf.add) elif alg == 'nccl/xring': summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards, tf.add) elif alg == 'nccl/rechd': summed_grads = all_reduce.build_nccl_then_recursive_hd( scaled_grads, tf.add) elif alg == 'nccl/pscpu': summed_grads = all_reduce.build_nccl_then_shuffle( scaled_grads, aux_devices, tf.add, tf.add_n) elif alg == 'pscpu/pscpu': summed_grads = all_reduce.build_shuffle_then_shuffle( scaled_grads, aux_devices, # TODO(tucker): devise a way of better specifying the device set # for the second level. [aux_devices[0]], tf.add_n) elif alg in ['pscpu', 'psgpu']: summed_grads = all_reduce.build_shuffle_all_reduce( scaled_grads, aux_devices, tf.add_n) else: raise ValueError('unsupported all_reduce alg: ', alg) result = [] for (_, v), g in zip(grad_and_vars, summed_grads): result.append([g, v]) return result def contains_any(haystack, needles): """Tests if any needle is a substring of haystack. Args: haystack: a string needles: list of strings Returns: True if any element of needles is a substring of haystack, False otherwise. """ for n in needles: if n in haystack: return True return False def sum_gradients_all_reduce(single_session, dev_prefixes, tower_grads, num_workers, alg, num_shards, gpu_indices, agg_small_grads_max_bytes=0, agg_small_grads_max_group=10, allreduce_merge_scope=1): """Apply all-reduce algorithm over specified gradient tensors. Args: single_session: true if reduction is applied to one graph across all workers, false if ths application is to a single-worker graph only. dev_prefixes: list of prefix strings to use to generate PS device names. tower_grads: the gradients to reduce. num_workers: number of worker processes across entire job. alg: the all-reduce algorithm to apply. num_shards: alg-specific sharding factor. gpu_indices: indices of local GPUs in order usable for ring-reduce. agg_small_grads_max_bytes: largest tensor eligible for aggregation, in number of bytes. agg_small_grads_max_group: largest permitted aggregation of small tensors. allreduce_merge_scope: size of groups into which to partition consecutive gradients grouped under a common 'allreduce' name scope for application of ScopedAllocator optimization. Returns: list of reduced tensors """ alg_contains_shuffle = contains_any(alg, ['pscpu', 'psgpu']) is_hierarchical = '/' in alg if 'pscpu' in alg: aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes] elif 'psgpu' in alg: aux_devices = [ prefix + '/gpu:%d' % i for i in range(len(gpu_indices)) for prefix in dev_prefixes ] else: aux_devices = ['/job:localhost/cpu:0'] aux_device_groups = group_device_names( aux_devices, num_shards if (alg != 'collective' and alg_contains_shuffle) else 1) group_index = 0 if agg_small_grads_max_bytes > 0 and agg_small_grads_max_group > 0: tower_grads, packing = pack_small_tensors( tower_grads, max_bytes=agg_small_grads_max_bytes, max_group=agg_small_grads_max_group) else: packing = None reduced_gv_list = [] gv = list(zip(*tower_grads)) merge_scope = allreduce_merge_scope if allreduce_merge_scope > 0 else 1 chunked_gv = [gv[x:x + merge_scope] for x in xrange(0, len(gv), merge_scope)] for chunk in chunked_gv: with tf.name_scope('allreduce'): for grad_and_vars in chunk: reduced_gv_list.append(sum_grad_and_var_all_reduce( single_session, grad_and_vars, num_workers, alg, gpu_indices, (aux_devices if is_hierarchical else aux_device_groups[group_index]), num_shards)) group_index = (group_index + 1) % len(aux_device_groups) new_tower_grads = [list(x) for x in zip(*reduced_gv_list)] if packing: new_tower_grads = unpack_small_tensors(new_tower_grads, packing) return new_tower_grads def extract_ranges(index_list, range_size_limit=32): """Extract consecutive ranges and singles from index_list. Args: index_list: List of monotone increasing non-negative integers. range_size_limit: Largest size range to return. If a larger consecutive range exists it will be returned as multiple ranges. Returns: ranges, singles where ranges is a list of [first, last] pairs of consecutive elements in index_list, and singles is all of the other elements, in original order. """ if not index_list: return [], [] first = index_list[0] last = first ranges = [] singles = [] for i in index_list[1:]: if i == last + 1 and (last - first) <= range_size_limit: last = i else: if last > first: ranges.append([first, last]) else: singles.append(first) first = i last = i if last > first: ranges.append([first, last]) else: singles.append(first) return ranges, singles GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes') def pack_range(key, packing, grad_vars, rng): """Form the concatenation of a specified range of gradient tensors. Args: key: Value under which to store meta-data in packing that will be used later to restore the grad_var list structure. packing: Dict holding data describing packed ranges of small tensors. grad_vars: List of (grad, var) pairs for one tower. rng: A pair of integers giving the first, last indices of a consecutive range of tensors to be packed. Returns: A tensor that is the concatenation of all the specified small tensors. """ to_pack = grad_vars[rng[0]:rng[1] + 1] members = [] variables = [] restore_shapes = [] with tf.name_scope('pack'): for g, v in to_pack: variables.append(v) restore_shapes.append(g.shape) with tf.device(g.device): members.append(tf.reshape(g, [-1])) packing[key] = GradPackTuple( indices=range(rng[0], rng[1] + 1), vars=variables, shapes=restore_shapes) with tf.device(members[0].device): return tf.concat(members, 0) def unpack_grad_tuple(gv, gpt): """Unpack a previously packed collection of gradient tensors. Args: gv: A (grad, var) pair to be unpacked. gpt: A GradPackTuple describing the packing operation that produced gv. Returns: A list of (grad, var) pairs corresponding to the values that were originally packed into gv, maybe following subsequent operations like reduction. """ elt_widths = [x.num_elements() for x in gpt.shapes] with tf.device(gv[0][0].device): with tf.name_scope('unpack'): splits = tf.split(gv[0], elt_widths) unpacked_gv = [] for idx, s in enumerate(splits): unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]), gpt.vars[idx])) return unpacked_gv def pack_small_tensors(tower_grads, max_bytes=0, max_group=0): """Concatenate small gradient tensors together for reduction. Args: tower_grads: List of lists of (gradient, variable) tuples. max_bytes: Int giving max number of bytes in a tensor that may be considered small. max_group: Int giving max number of small tensors that may be concatenated into one new tensor. Returns: new_tower_grads, packing where new_tower_grads is identical to tower_grads except that all feasible small_tensors have been removed from their places and concatenated into larger tensors that are now in the front of the list for each tower, and packing contains the data necessary to restore the tower_grads structure. Look through the first tower for gradients of the same type (float), and small size, that are all sequential. For each such group, replace by a new tensor that is a flattened concatenation. Note that the corresponding variable will be absent, which doesn't matter because it isn't used during all-reduce. Requires: Every gv_list in towers must have isomorphic structure including identical tensor sizes and types. """ small_indices = [] large_indices = [] for idx, (g, _) in enumerate(tower_grads[0]): if g.dtype == tf.float32 and (4 * g.shape.num_elements()) <= max_bytes: small_indices.append(idx) else: large_indices.append(idx) small_ranges, small_singles = extract_ranges( small_indices, range_size_limit=max_group) large_indices = sorted(large_indices + small_singles) num_gv = len(tower_grads[0]) packing = {} if small_ranges: new_tower_grads = [] for dev_idx, gv_list in enumerate(tower_grads): assert len(gv_list) == num_gv new_gv_list = [] for r in small_ranges: key = '%d:%d' % (dev_idx, len(new_gv_list)) new_gv_list.append((pack_range(key, packing, gv_list, r), 'packing_var_placeholder')) for i in large_indices: new_gv_list.append(gv_list[i]) new_tower_grads.append(new_gv_list) return new_tower_grads, packing else: return tower_grads, None def unpack_small_tensors(tower_grads, packing): """Undo the structure alterations to tower_grads done by pack_small_tensors. Args: tower_grads: List of List of (grad, var) tuples. packing: A dict generated by pack_small_tensors describing the changes it made to tower_grads. Returns: new_tower_grads: identical to tower_grads except that concatentations of small tensors have been split apart and returned to their original positions, paired with their original variables. """ if not packing: return tower_grads new_tower_grads = [] num_devices = len(tower_grads) num_packed = len(packing.keys()) // num_devices for dev_idx, gv_list in enumerate(tower_grads): new_gv_list = gv_list[num_packed:] for i in xrange(0, num_packed): k = '%d:%d' % (dev_idx, i) gpt = packing[k] gv = unpack_grad_tuple(gv_list[i], gpt) for gi, idx in enumerate(gpt.indices): assert idx == gpt.indices[gi] new_gv_list.insert(idx, gv[gi]) new_tower_grads.append(new_gv_list) return new_tower_grads
PyTorch/LanguageModeling/BERT/scripts/configs
configs
glue_config
#!/usr/bin/env bash # Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e batch_size_and_gradient_accumulation_steps() { batch_size=$((global_batch_size / num_gpu)) gradient_accumulation_steps=1 while [ $((batch_size / gradient_accumulation_steps)) -gt $batch_size_capacity ] do gradient_accumulation_steps=$((gradient_accumulation_steps * 2)) done } commons () { init_checkpoint=/workspace/bert/checkpoints/bert_uncased.pt vocab_file=${BERT_PREP_WORKING_DIR}/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt config_file=/workspace/bert/bert_configs/large.json max_steps=-1.0 } mrpc_commons () { data_dir=${BERT_PREP_WORKING_DIR}/download/glue/MRPC/ out_dir=/workspace/bert/results/MRPC task_name=mrpc global_batch_size=128 learning_rate=1.8e-5 warmup_proportion=0.3 epochs=3 } sst-2_commons () { data_dir=${BERT_PREP_WORKING_DIR}/download/glue/SST-2/ out_dir=/workspace/bert/results/SST-2 task_name=sst-2 global_batch_size=1024 learning_rate=1e-5 warmup_proportion=0.1 epochs=3 } dgxa100-80g_fp16_commons () { batch_size_capacity=256 precision=fp16 } dgxa100-80g_tf32_commons () { batch_size_capacity=128 precision=tf32 } dgx1-32g_fp16_commons () { batch_size_capacity=128 precision=fp16 } dgx1-32g_fp32_commons () { batch_size_capacity=64 precision=fp32 } print_arguments_in_order () { echo \ $init_checkpoint \ $data_dir \ $vocab_file \ $config_file \ $out_dir \ $task_name \ $num_gpu \ $batch_size \ $gradient_accumulation_steps \ $learning_rate \ $warmup_proportion \ $epochs \ $max_steps \ $precision } ########################################## # DGXA100 80G # ########################################## ########################## # MRPC # ########################## # AMP mrpc_dgxa100-80g_1gpu_fp16 () { commons mrpc_commons dgxa100-80g_fp16_commons num_gpu=1 batch_size_and_gradient_accumulation_steps print_arguments_in_order } mrpc_dgxa100-80g_2gpu_fp16 () { commons mrpc_commons dgxa100-80g_fp16_commons num_gpu=2 batch_size_and_gradient_accumulation_steps print_arguments_in_order } mrpc_dgxa100-80g_4gpu_fp16 () { commons mrpc_commons dgxa100-80g_fp16_commons num_gpu=4 batch_size_and_gradient_accumulation_steps print_arguments_in_order } mrpc_dgxa100-80g_8gpu_fp16 () { commons mrpc_commons dgxa100-80g_fp16_commons num_gpu=8 batch_size_and_gradient_accumulation_steps print_arguments_in_order } # TF32 mrpc_dgxa100-80g_1gpu_tf32 () { commons mrpc_commons dgxa100-80g_tf32_commons num_gpu=1 batch_size_and_gradient_accumulation_steps print_arguments_in_order } mrpc_dgxa100-80g_2gpu_tf32 () { commons mrpc_commons dgxa100-80g_tf32_commons num_gpu=2 batch_size_and_gradient_accumulation_steps print_arguments_in_order } mrpc_dgxa100-80g_4gpu_tf32 () { commons mrpc_commons dgxa100-80g_tf32_commons num_gpu=4 batch_size_and_gradient_accumulation_steps print_arguments_in_order } mrpc_dgxa100-80g_8gpu_tf32 () { commons mrpc_commons dgxa100-80g_tf32_commons num_gpu=8 batch_size_and_gradient_accumulation_steps print_arguments_in_order } ########################## # SST-2 # ########################## # AMP sst-2_dgxa100-80g_1gpu_fp16 () { commons sst-2_commons dgxa100-80g_fp16_commons num_gpu=1 batch_size_and_gradient_accumulation_steps print_arguments_in_order } sst-2_dgxa100-80g_2gpu_fp16 () { commons sst-2_commons dgxa100-80g_fp16_commons num_gpu=2 batch_size_and_gradient_accumulation_steps print_arguments_in_order } sst-2_dgxa100-80g_4gpu_fp16 () { commons sst-2_commons dgxa100-80g_fp16_commons num_gpu=4 batch_size_and_gradient_accumulation_steps print_arguments_in_order } sst-2_dgxa100-80g_8gpu_fp16 () { commons sst-2_commons dgxa100-80g_fp16_commons num_gpu=8 batch_size_and_gradient_accumulation_steps print_arguments_in_order } # TF32 sst-2_dgxa100-80g_1gpu_tf32 () { commons sst-2_commons dgxa100-80g_tf32_commons num_gpu=1 batch_size_and_gradient_accumulation_steps print_arguments_in_order } sst-2_dgxa100-80g_2gpu_tf32 () { commons sst-2_commons dgxa100-80g_tf32_commons num_gpu=2 batch_size_and_gradient_accumulation_steps print_arguments_in_order } sst-2_dgxa100-80g_4gpu_tf32 () { commons sst-2_commons dgxa100-80g_tf32_commons num_gpu=4 batch_size_and_gradient_accumulation_steps print_arguments_in_order } sst-2_dgxa100-80g_8gpu_tf32 () { commons sst-2_commons dgxa100-80g_tf32_commons num_gpu=8 batch_size_and_gradient_accumulation_steps print_arguments_in_order } ########################################## # DGX1 32G # ########################################## ########################## # MRPC # ########################## # AMP mrpc_dgx1-32g_1gpu_fp16 () { commons mrpc_commons dgx1-32g_fp16_commons num_gpu=1 batch_size_and_gradient_accumulation_steps print_arguments_in_order } mrpc_dgx1-32g_2gpu_fp16 () { commons mrpc_commons dgx1-32g_fp16_commons num_gpu=2 batch_size_and_gradient_accumulation_steps print_arguments_in_order } mrpc_dgx1-32g_4gpu_fp16 () { commons mrpc_commons dgx1-32g_fp16_commons num_gpu=4 batch_size_and_gradient_accumulation_steps print_arguments_in_order } mrpc_dgx1-32g_8gpu_fp16 () { commons mrpc_commons dgx1-32g_fp16_commons num_gpu=8 batch_size_and_gradient_accumulation_steps print_arguments_in_order } # FP32 mrpc_dgx1-32g_1gpu_fp32 () { commons mrpc_commons dgx1-32g_fp32_commons num_gpu=1 batch_size_and_gradient_accumulation_steps print_arguments_in_order } mrpc_dgx1-32g_2gpu_fp32 () { commons mrpc_commons dgx1-32g_fp32_commons num_gpu=2 batch_size_and_gradient_accumulation_steps print_arguments_in_order } mrpc_dgx1-32g_4gpu_fp32 () { commons mrpc_commons dgx1-32g_fp32_commons num_gpu=4 batch_size_and_gradient_accumulation_steps print_arguments_in_order } mrpc_dgx1-32g_8gpu_fp32 () { commons mrpc_commons dgx1-32g_fp32_commons num_gpu=8 batch_size_and_gradient_accumulation_steps print_arguments_in_order } ########################## # SST-2 # ########################## # AMP sst-2_dgx1-32g_1gpu_fp16 () { commons sst-2_commons dgx1-32g_fp16_commons num_gpu=1 batch_size_and_gradient_accumulation_steps print_arguments_in_order } sst-2_dgx1-32g_2gpu_fp16 () { commons sst-2_commons dgx1-32g_fp16_commons num_gpu=2 batch_size_and_gradient_accumulation_steps print_arguments_in_order } sst-2_dgx1-32g_4gpu_fp16 () { commons sst-2_commons dgx1-32g_fp16_commons num_gpu=4 batch_size_and_gradient_accumulation_steps print_arguments_in_order } sst-2_dgx1-32g_8gpu_fp16 () { commons sst-2_commons dgx1-32g_fp16_commons num_gpu=8 batch_size_and_gradient_accumulation_steps print_arguments_in_order } # FP32 sst-2_dgx1-32g_fp32_commons () { global_batch_size=512 } sst-2_dgx1-32g_1gpu_fp32 () { commons sst-2_commons dgx1-32g_fp32_commons sst-2_dgx1-32g_fp32_commons num_gpu=1 batch_size_and_gradient_accumulation_steps print_arguments_in_order } sst-2_dgx1-32g_2gpu_fp32 () { commons sst-2_commons dgx1-32g_fp32_commons sst-2_dgx1-32g_fp32_commons num_gpu=2 batch_size_and_gradient_accumulation_steps print_arguments_in_order } sst-2_dgx1-32g_4gpu_fp32 () { commons sst-2_commons dgx1-32g_fp32_commons sst-2_dgx1-32g_fp32_commons num_gpu=4 batch_size_and_gradient_accumulation_steps print_arguments_in_order } sst-2_dgx1-32g_8gpu_fp32 () { commons sst-2_commons dgx1-32g_fp32_commons sst-2_dgx1-32g_fp32_commons num_gpu=8 batch_size_and_gradient_accumulation_steps print_arguments_in_order }
Tools/PyTorch/TimeSeriesPredictionPlatform/data
data
datasets
# Copyright 2021-2022 NVIDIA Corporation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import os import pickle from bisect import bisect import dgl import numpy as np import pandas as pd import torch from data.data_utils import InputTypes, DataTypes, FEAT_NAMES, FEAT_ORDER, DTYPE_MAP, translate_features import dgl from dgl.transform import metis_partition_assignment from torch.utils.data import Dataset from torch.utils.data.dataloader import default_collate from data.xgb_util import load_xgb_df, feat_adder, data_label_split, select_test_group, target_shift, \ xgb_multiID_preprocess from bisect import bisect from data.data_utils import InputTypes, DataTypes, FEAT_NAMES, FEAT_ORDER, DTYPE_MAP, translate_features, group_ids class TSBaseDataset(Dataset): def __init__(self, features, df, encoder_length, example_length, stride=1, **kwargs): super().__init__() assert example_length > encoder_length self.features = features self.encoder_length = encoder_length self.example_length = example_length self.stride = stride self.df = df self.load() self.features = [i for i in self.features if i.feature_type != InputTypes.TIME] self.feature_type_col_map = [ [i for i, f in enumerate(self.features) if (f.feature_type, f.feature_embed_type) == x] for x in FEAT_ORDER ] def load(self): raise NotImplementedError class TSDataset(TSBaseDataset): def __init__(self, features, df=None, encoder_length=52, example_length=54, stride=1, **kwargs): super().__init__(features, df, encoder_length, example_length, stride) self.grouped = [x for x in self.grouped if x.shape[0] >= self.example_length] self.group_lens = [(g.shape[0] - self.example_length + 1) // self.stride for g in self.grouped] self._cum_examples_in_group = np.cumsum(self.group_lens) self.grouped = [ [ arr[:, idxs].view(dtype=np.float32).astype(DTYPE_MAP[t[1]]) for t, idxs in zip(FEAT_ORDER, self.feature_type_col_map) ] for arr in self.grouped ] def load(self): if isinstance(self.df, pd.DataFrame): data = self.df else: data = pd.read_csv(self.df, index_col=0) self.grouped = group_ids(data, self.features) def get_probabilities(self): sampled = [] for i in range(len(self.grouped)): group_len = self.group_lens[i] group = self.grouped[i] sample_weights = group[-1] sampled.append(sample_weights[np.arange(0, self.stride * group_len, self.stride)]) sampled = np.concatenate(sampled) return sampled def __len__(self): return self._cum_examples_in_group[-1] def __getitem__(self, idx): g_idx = bisect(self._cum_examples_in_group, idx) e_idx = idx - self._cum_examples_in_group[g_idx - 1] if g_idx else idx group = self.grouped[g_idx] tensors = [ torch.from_numpy(feat[e_idx * self.stride: e_idx * self.stride + self.example_length]) if feat.size else torch.empty(0) for feat in group ] out = dict(zip(FEAT_NAMES, tensors)) out["id"] = out["id"][0, :] return out class TSBinaryDataset(TSDataset): def load(self): if isinstance(self.df, pd.DataFrame): data = self.df self.grouped = group_ids(data, self.features) else: self.grouped = pickle.load(open(self.df, "rb")) class TSMultiIDDatasetBase(TSBaseDataset): def __init__(self, features, df=None, encoder_length=52, example_length=54, stride=1, collumns_to_collapse=None, **kwargs ): super().__init__(features, df, encoder_length, example_length, stride) # This part is tricky: we want to do this only for training dataset and then apply the same changes to valid and test splits to maintain coherence. # We can't do this in the preprocessing step because many different dataset classes rely on the same csv file. Thus the first time dataset is created # if we pass empty list of collumns to collapse and populate it here. This list is a part for common argument set for the train, valid and test splits # so is maintained throughout construction of all the splits. if collumns_to_collapse is not None: if not collumns_to_collapse: for name, df in self.tables.items(): if df.eq(df.iloc[:, 0], axis=0).all().all(): self.tables[name] = df.iloc[:, :1] collumns_to_collapse.append(name) # Append dummy value to indicate that this this operation has already been performed # This alleviates an edge case in which in train split we don't collapse any collumns and then we pass an empty list allowing collapse of # collumns in valid and test sets. collumns_to_collapse.append(None) else: for name in collumns_to_collapse: if name is not None: self.tables[name] = self.tables[name].iloc[:, :1] self.data = {} for fname, ftype in zip(FEAT_NAMES, FEAT_ORDER): names = [f.name for f in self.features if (f.feature_type, f.feature_embed_type) == ftype] if names: df = pd.concat([v for k,v in self.tables.items() if k in names], axis=1) self.data[fname] = df.values.astype(dtype=DTYPE_MAP[ftype[1]]) else: self.data[fname] = None del self.tables self._n_timeslices = (next(len(df) for df in self.data.values() if df is not None) - self.example_length + 1) // self.stride def load(self): time_col_name = next(x.name for x in self.features if x.feature_type == InputTypes.TIME) id_col_name = next(x.name for x in self.features if x.feature_type == InputTypes.ID) if isinstance(self.df, pd.DataFrame): data = self.df else: data = pd.read_csv(self.df, index_col=0) self.tables = {} for f in self.features: self.tables[f.name] = data.pivot(index=time_col_name, columns=id_col_name, values=f.name) class TSMultiTargetDataset(TSMultiIDDatasetBase): def __len__(self): return self._n_timeslices def __getitem__(self, idx): if idx < 0: idx = idx + len(self) if idx >= len(self) or idx < 0: raise IndexError out = { k: torch.from_numpy(v[idx * self.stride : idx * self.stride + self.example_length]) if v is not None else torch.empty(0) for k,v in self.data.items() } return out class TSMultiIDDataset(TSMultiIDDatasetBase): def __init__(self, features, df=None, encoder_length=52, example_length=54, stride=1, collumns_to_collapse=None, **kwargs): super().__init__(features, df, encoder_length, example_length, stride, collumns_to_collapse) def __len__(self): return self._n_timeslices * self.data['id'].shape[1] def __getitem__(self, idx): g_idx = idx // self._n_timeslices e_idx = idx - g_idx * self._n_timeslices targets = torch.from_numpy(self.data['target'][e_idx * self.stride : e_idx * self.stride + self.example_length]) out = { k: torch.from_numpy(v[e_idx * self.stride : e_idx * self.stride + self.example_length, :]) if v is not None else torch.empty(0) for k,v in self.data.items() } out['o_cont'] = torch.cat([out['o_cont'], targets], dim=-1) out['s_cat'] = out['s_cat'][:, g_idx].unsqueeze(1) if out['s_cat'].numel() else out['s_cat'] out['s_cont'] = out['s_cont'][:, g_idx].unsqueeze(1) if out['s_cont'].numel() else out['s_cont'] out['id'] = out['id'][:, g_idx] out['target'] = out['target'][:, g_idx].unsqueeze(1) out['weight'] = out['weight'][:, g_idx].unsqueeze(1) if out['weight'].numel() else out['weight'] return out class StatDataset(Dataset): def __init__(self, features, path_stat, df=None, encoder_length=52, example_length=54, stride=1, split=None, split_feature=None, ds_type=None): self.ds_type = ds_type if ds_type == "valid": return super().__init__() assert example_length > encoder_length, "Length of example longer than encoder length" assert split, "Split not given" assert ds_type in ["train", "test"] self.features = features self.time_feature = split_feature self.weight_features = [feature.name for feature in self.features if feature.feature_type == InputTypes.WEIGHT] self.encoder_length = encoder_length self.example_length = example_length self.horizon = self.example_length - self.encoder_length self.stride = stride self.split = split self.id_col_name = next(x.name for x in self.features if x.feature_type == InputTypes.ID) self.col_dtypes = {v.name: DTYPE_MAP[v.feature_embed_type] for v in self.features} if isinstance(df, pd.DataFrame): self.data = df.astype(self.col_dtypes) else: self.data = pd.read_csv(os.path.join(path_stat, "full.csv"), dtype=self.col_dtypes) self.data = self.data.groupby(self.id_col_name).filter(lambda group: len(group) >= self.example_length) self.grouped = list(self.data.groupby(self.id_col_name)) self.endog = [feature.name for feature in self.features if feature.feature_type == InputTypes.TARGET] self.exog = [ feature.name for feature in self.features if feature.feature_type in [InputTypes.KNOWN, InputTypes.OBSERVED, InputTypes.STATIC] and feature.feature_embed_type == DataTypes.CONTINUOUS ] self.grouped = [group[1] for group in self.grouped] self.grouped = [ group for group in self.grouped if len(group[group[self.time_feature] <= self.split]) >= self.encoder_length and len(group[group[self.time_feature] > self.split]) >= self.horizon ] self._cum_examples_in_group = np.cumsum( [(len(group[group[self.time_feature] > split]) - self.horizon) // self.stride + 1 for group in self.grouped] ) def __len__(self): if self.ds_type == "valid": raise ValueError return self._cum_examples_in_group[-1] def __getitem__(self, idx): if self.ds_type == "valid": raise ValueError if idx > self._cum_examples_in_group[-1]: raise StopIteration g_idx = bisect(self._cum_examples_in_group, idx) e_idx = idx - self._cum_examples_in_group[g_idx - 1] if g_idx else idx group = self.grouped[g_idx] test = group[group[self.time_feature] > self.split] if self.ds_type == "test": test_slice = test[self.stride * e_idx: self.stride * e_idx + self.horizon] test_out = {"endog": test_slice[self.endog], "exog": test_slice[self.exog], "id": test_slice[self.id_col_name]} if len(self.weight_features): test_out["weight"] = test_slice[self.weight_features] return test_out else: train = group[group[self.time_feature] <= self.split] if (self.encoder_length - self.stride * e_idx) > 0: train_slice = train[-(self.encoder_length - self.stride * e_idx):].append( test[max(0, self.stride * e_idx - self.encoder_length): self.stride * e_idx] ) else: train_slice = test[max(0, self.stride * e_idx - self.encoder_length): self.stride * e_idx] train_out = {"endog": train_slice[self.endog], "exog": train_slice[self.exog]} return train_out class XGBDataset(Dataset): def __init__(self, df, path_xgb, features_xgb, lag_features, moving_average_features, example_length, encoder_length, time_series_count, MultiID, ds_type, **kwargs): self.ds_type = ds_type features = features_xgb dest_path = df if isinstance(df, pd.DataFrame) else path_xgb self.encoder_length = encoder_length self.example_length = example_length lag_features_conf = lag_features self.lag_features = {} for feat in lag_features_conf: assert feat.get("min_value", None) is not None or feat.get("value", None) is not None if feat.get("min_value", None) is not None: assert feat.get("max_value", None) is not None and feat.get("min_value") > 0 and feat.get( "max_value") > feat.get("min_value") self.lag_features[feat.name] = list(range(feat.get("min_value"), feat.get("max_value") + 1)) else: self.lag_features[feat.name] = list(feat.value) moving_average_features_conf = moving_average_features self.moving_average_features = {} for feat in moving_average_features_conf: assert feat.get("window_size", None) is not None self.moving_average_features[feat.name] = self.moving_average_features.get(feat.name, []) + [ feat.window_size] self.horizon = example_length - encoder_length self.target = [feature.name for feature in features if feature.feature_type == "TARGET"] self.observed = [feature.name for feature in features if feature.feature_type == "OBSERVED"] self.known = [feature.name for feature in features if feature.feature_type in ["KNOWN", "STATIC"]] assert len(self.target) == 1, "Only 1 target feature is currently supported with xgboost" self.data = load_xgb_df(dest_path, features, ds_type) self.extra_columns = [[f'{k}_{i}' for i in v] for k, v in self.lag_features.items()] if MultiID: target = self.target[0] lag_target_value = self.lag_features.pop(target, []) for i in range(time_series_count): self.lag_features[f'{target}_{i}'] = lag_target_value self.moving_average_features[f'{target}_{i}'] = self.moving_average_features.pop(target, []) self.data = xgb_multiID_preprocess(self.data, features, time_series_count) # XXX need to work with self.data = feat_adder(self.data, self.lag_features, self.moving_average_features) def __getitem__(self, idx): if idx >= self.horizon: raise StopIteration data_step = self.data.copy() data_step = target_shift(data_step, self.target, self.known, idx) if self.ds_type == 'test': data_step = select_test_group(data_step, self.encoder_length, self.example_length) labels = data_label_split(data_step, [f'{i}_target' for i in self.target]) return data_step, labels def __len__(self): return self.horizon class ClusteredGraphDataset(Dataset): def __init__(self, graph, graph_partitions=10, partition_joining_coef=2, **kwargs): if isinstance(graph, str): self.graph = pickle.load(open(graph, "rb")) else: self.graph = graph assert isinstance(graph_partitions, int) and graph_partitions > 0 assert partition_joining_coef <= graph_partitions self.part_count = graph_partitions if graph_partitions > 1: self.partition = metis_partition_assignment(self.graph, self.part_count) else: self.partition = torch.zeros(self.graph.num_nodes(), dtype=torch.int64) self.joining_coef = partition_joining_coef def __len__(self): return math.comb(self.part_count, self.joining_coef) def __getitem__(self, idx): indicator = self.idx_to_combination(self.part_count, self.joining_coef, idx) c_ids = np.nonzero(indicator)[0] subgraph = self.get_subgraph(c_ids) return subgraph def get_subgraph(self, c_ids): ids = sum([self.partition == i for i in c_ids]).bool() return self.graph.subgraph(ids) def idx_to_combination(self, n, r, m): """ n: int total number of elements r: int number of elements in combination m: int 0-based index of combination in reverse-lexicographic order Returns list - indicator vector of chosen elements """ assert m < math.comb(n, r), "Index out of range" out = [0] * n while n > 0: if n > r and r >= 0: y = math.comb(n - 1, r) else: y = 0 if m >= y: m -= y out[n - 1] = 1 r -= 1 n -= 1 return out class TemporalClusteredGraphDataset(ClusteredGraphDataset): def __init__(self, features, graph, df=None, encoder_length=52, example_length=54, stride=1, **kwargs): super().__init__(graph, **kwargs) assert example_length > encoder_length self.features = [i for i in features if i.feature_type != InputTypes.TIME] self.encoder_length = encoder_length self.example_length = example_length self.stride = stride self.df = df self.feature_type_col_map = [ np.array([i for i, f in enumerate(self.features) if (f.feature_type, f.feature_embed_type) == x]) for x in FEAT_ORDER ] if isinstance(df, pd.DataFrame): data = self.df grouped = group_ids(data, self.features) else: grouped = pickle.load(open(self.df, "rb")) # We assume that all the time series are of the same length and have the same set of features assert all([x.shape == grouped[0].shape for x in grouped]) ndata = np.stack(grouped) self.ndata = { name: ndata[:, :, ids].view(dtype=np.float32).astype(DTYPE_MAP[f[1]]) if not ids.size == 0 else np.empty((*ndata.shape[:-1], 0)) for name, f, ids in zip(FEAT_NAMES, FEAT_ORDER, self.feature_type_col_map) } self.t_dim = ndata.shape[1] self.n_timeslices = (self.t_dim - self.example_length + 1) // self.stride def __len__(self): # the number of possible subgraphs times the number of possible time slices return super().__len__() * self.n_timeslices def __getitem__(self, idx): g_idx = idx // self.n_timeslices t_idx = idx - g_idx * self.n_timeslices subgraph = super().__getitem__(g_idx) node_ids = np.array(subgraph.ndata["_ID"]) for k, v in self.ndata.items(): subgraph.ndata[k] = torch.from_numpy( v[node_ids, t_idx * self.stride: t_idx * self.stride + self.example_length, :] ) return subgraph def create_datasets(config, input_df=None): def select_dataset_class(config): binarized = config.get("binarized", False) graph_dataset = config.get("construct_graph", False) multi_id_dataset = config.get("MultiID", False) single_target = config.get('single_target', False) if config.get("xgb", False): specific_args = { "path_xgb": config.dest_path, "features_xgb": config.features, "lag_features": config.get("lag_features", []), "moving_average_features": config.get("moving_average_features", []), "time_series_count": config.time_series_count, "MultiID": config.get("MultiID", False) } return XGBDataset, specific_args if config.get("stat", False): specific_args = { "path_stat": config.dest_path, "split": config.test_range[0], "split_feature": config.time_ids } return StatDataset, specific_args if binarized and graph_dataset: specific_args = { "graph": os.path.join(config.dest_path, "graph.bin"), "graph_partitions": config.graph_partitions, "partition_joining_coef": config.partition_joining_coef, } return TemporalClusteredGraphDataset, specific_args elif binarized and multi_id_dataset: raise NotImplementedError elif binarized: return TSBinaryDataset, {} elif not binarized and graph_dataset: raise NotImplementedError elif not binarized and multi_id_dataset and not single_target: specific_args = {} if config.get('collapse_identical_columns', False): specific_args['collumns_to_collapse'] = [] return TSMultiTargetDataset, specific_args elif not binarized and multi_id_dataset and single_target: specific_args = {} if config.get('collapse_identical_columns', False): specific_args['collumns_to_collapse'] = [] return TSMultiIDDataset, specific_args else: return TSDataset, {} common_args = { "features": translate_features(config.features), "encoder_length": config.encoder_length, "example_length": config.example_length, "stride": config.get("stride", 1), } dataset_class, specific_args = select_dataset_class(config) if input_df is not None: print("Input DataFrame provided to create_datasets functions") print("Warning: Please make sure the dataframe is preprocessed") test = dataset_class(df=input_df, **common_args, **specific_args, ds_type='test') train = None valid = None else: path_template = os.path.join(config.dest_path, "{{subset}}.{extension}") path_template = path_template.format(extension="bin" if config.get("binarized", False) else "csv") train = dataset_class(df=path_template.format(subset="train"), **common_args, **specific_args, ds_type="train") valid = dataset_class(df=path_template.format(subset="valid"), **common_args, **specific_args, ds_type="valid") test = dataset_class(df=path_template.format(subset="test"), **common_args, **specific_args, ds_type="test") if not (config.get("xgb", False) or config.get("stat", False)): train = sample_data(train, config.get("train_samples", -1)) valid = sample_data(valid, config.get("valid_samples", -1)) return train, valid, test def sample_data(dataset, num_samples): if num_samples < 0: return dataset else: return torch.utils.data.Subset(dataset, np.random.choice(np.arange(len(dataset)), size=num_samples, replace=False)) def get_collate_fn(model_type, encoder_length, test=False): allowed_types = ['default', 'graph', 'autoregressive'] if model_type not in allowed_types: raise ValueError(f'Model type has to be one of {allowed_types}') def collate_graph(samples): """A collater used for GNNs""" batch = dgl.batch(samples) labels = batch.ndata["target"][:, encoder_length:, :] weights = batch.ndata['weight'] if weights is not None and weights.numel(): weights = weights[:, encoder_length :, :] return batch, labels, weights def collate_ar(samples): batch = default_collate(samples) labels = batch["target"] weights = batch['weight'] return batch, labels, weights def collate_dict(samples): """Default TSPP collater""" batch = default_collate(samples) labels = batch["target"][:, encoder_length:, :] weights = batch['weight'] if weights is not None and weights.numel(): weights = weights[:, encoder_length:, :] return batch, labels, weights if model_type == 'graph': return collate_graph elif model_type == 'autoregressive' and not test: return collate_ar else: return collate_dict
PyTorch/LanguageModeling/BART/utils
utils
optimization
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import math from typing import Callable, Iterable, Tuple import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from utils import logging logger = logging.get_logger(__name__) def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1): """ Create a schedule with a constant learning rate, using the learning rate set in optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1): """ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. num_cycles (:obj:`float`, `optional`, defaults to 0.5): The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_with_hard_restarts_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1 ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. num_cycles (:obj:`int`, `optional`, defaults to 1): The number of hard restarts to use. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0)))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_polynomial_decay_schedule_with_warmup( optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1 ): """ Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by `lr_end`, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. lr_end (:obj:`float`, `optional`, defaults to 1e-7): The end LR. power (:obj:`float`, `optional`, defaults to 1.0): Power factor. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Note: `power` defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_init = optimizer.defaults["lr"] assert lr_init > lr_end, f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lr_range = lr_init - lr_end decay_steps = num_training_steps - num_warmup_steps pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps decay = lr_range * pct_remaining ** power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(optimizer, lr_lambda, last_epoch) class AdamW(Optimizer): """ Implements Adam algorithm with weight decay fix as introduced in `Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__. Parameters: params (:obj:`Iterable[torch.nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (:obj:`float`, `optional`, defaults to 1e-3): The learning rate to use. betas (:obj:`Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)): Adam's betas parameters (b1, b2). eps (:obj:`float`, `optional`, defaults to 1e-6): Adam's epsilon for numerical stability. weight_decay (:obj:`float`, `optional`, defaults to 0): Decoupled weight decay to apply. correct_bias (:obj:`bool`, `optional`, defaults to `True`): Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`). """ def __init__( self, params: Iterable[torch.nn.parameter.Parameter], lr: float = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-6, weight_decay: float = 0.0, correct_bias: bool = True, ): if lr < 0.0: raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1])) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias) super().__init__(params, defaults) def step(self, closure: Callable = None): """ Performs a single optimization step. Arguments: closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead") state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2) denom = exp_avg_sq.sqrt().add_(group["eps"]) step_size = group["lr"] if group["correct_bias"]: # No bias correction for Bert bias_correction1 = 1.0 - beta1 ** state["step"] bias_correction2 = 1.0 - beta2 ** state["step"] step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 p.data.addcdiv_(exp_avg, denom, value=-step_size) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. # Add weight decay at the end (fixed version) if group["weight_decay"] > 0.0: p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"]) return loss class Adafactor(Optimizer): """ AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py Paper: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` https://arxiv.org/abs/1804.04235 Note that this optimizer internally adjusts the learning rate depending on the *scale_parameter*, *relative_step* and *warmup_init* options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and `relative_step=False`. Arguments: params (:obj:`Iterable[torch.nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (:obj:`float`, `optional`): The external learning rate. eps (:obj:`Tuple[float, float]`, `optional`, defaults to (1e-30, 1e-3)): Regularization constants for square gradient and parameter scale respectively clip_threshold (:obj:`float`, `optional`, defaults 1.0): Threshold of root mean square of final gradient update decay_rate (:obj:`float`, `optional`, defaults to -0.8): Coefficient used to compute running averages of square beta1 (:obj:`float`, `optional`): Coefficient used for computing running averages of gradient weight_decay (:obj:`float`, `optional`, defaults to 0): Weight decay (L2 penalty) scale_parameter (:obj:`bool`, `optional`, defaults to :obj:`True`): If True, learning rate is scaled by root mean square relative_step (:obj:`bool`, `optional`, defaults to :obj:`True`): If True, time-dependent learning rate is computed instead of external learning rate warmup_init (:obj:`bool`, `optional`, defaults to :obj:`False`): Time-dependent learning rate computation depends on whether warm-up initialization is being used This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested. Recommended T5 finetuning settings: - Scheduled LR warm-up to fixed LR - disable relative updates - use clip threshold: https://arxiv.org/abs/2004.14546 Example:: Adafactor(model.parameters(), lr=1e-3, relative_step=False, warmup_init=True) - Alternatively, relative_step with warmup_init can be used. - Training without LR warmup or clip threshold is not recommended. Additional optimizer operations like gradient clipping should not be used alongside Adafactor. Usage:: # replace AdamW with Adafactor optimizer = Adafactor( model.parameters(), lr=1e-3, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False ) """ def __init__( self, params, lr=None, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False, ): if lr is not None and relative_step: raise ValueError("Cannot combine manual lr and relative_step options") if warmup_init and not relative_step: raise ValueError("warmup_init requires relative_step=True") defaults = dict( lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init, ) super().__init__(params, defaults) @staticmethod def _get_lr(param_group, param_state): rel_step_sz = param_group["lr"] if param_group["relative_step"]: min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) param_scale = 1.0 if param_group["scale_parameter"]: param_scale = max(param_group["eps"][1], param_state["RMS"]) return param_scale * rel_step_sz @staticmethod def _get_options(param_group, param_shape): factored = len(param_shape) >= 2 use_first_moment = param_group["beta1"] is not None return factored, use_first_moment @staticmethod def _rms(tensor): return tensor.norm(2) / (tensor.numel() ** 0.5) @staticmethod def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_() c_factor = exp_avg_sq_col.rsqrt() return torch.mm(r_factor.unsqueeze(-1), c_factor.unsqueeze(0)) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError("Adafactor does not support sparse gradients.") state = self.state[p] grad_shape = grad.shape factored, use_first_moment = self._get_options(group, grad_shape) # State Initialization if len(state) == 0: state["step"] = 0 if use_first_moment: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(grad) if factored: state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad) else: state["exp_avg_sq"] = torch.zeros_like(grad) state["RMS"] = 0 else: if use_first_moment: state["exp_avg"] = state["exp_avg"].to(grad) if factored: state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) else: state["exp_avg_sq"] = state["exp_avg_sq"].to(grad) p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state["step"] += 1 state["RMS"] = self._rms(p_data_fp32) group["lr"] = self._get_lr(group, state) beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) update = (grad ** 2) + group["eps"][0] if factored: exp_avg_sq_row = state["exp_avg_sq_row"] exp_avg_sq_col = state["exp_avg_sq_col"] exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1)) exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2)) # Approximation of exponential moving average of square of gradient update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state["exp_avg_sq"] exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)) update.mul_(group["lr"]) if use_first_moment: exp_avg = state["exp_avg"] exp_avg.mul_(group["beta1"]).add_(1 - group["beta1"], update) update = exp_avg if group["weight_decay"] != 0: p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32) p_data_fp32.add_(-update) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/denoiser
denoiser
denoiserLoader
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_DENOISERLOADER_H #define TT2I_DENOISERLOADER_H #include "denoiserInstance.h" #include <memory> #include <string> namespace nvinfer1 { class IBuilder; } namespace tts { class EngineCache; class DenoiserLoader { public: /** * @brief Load a new DenoiserInstance from an engine file or a json file. * * @param cache The engine cache. * @param builder The TensorRT Engine Builder. * @param filename The name of the engine/json file. * @param fp16 If building an engine from a json file, whether or not to * allow fp16 operations. If loading an engine file, this input is ignored. * @param batchSize If building an engine from a json file, the maximum batch * size to support. If loading an engine file, this input is ignored. * * @return The newly created DenoiserInstance. */ static std::shared_ptr<DenoiserInstance> load(EngineCache& cache, nvinfer1::IBuilder& builder, const std::string& filename, bool fp16 = true, int batchSize = 8); }; } // namespace tts #endif
TensorFlow/Detection/SSD/models/research/object_detection/models
models
faster_rcnn_pnas_feature_extractor
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """PNASNet Faster R-CNN implementation. Based on PNASNet model: https://arxiv.org/abs/1712.00559 """ import tensorflow as tf from object_detection.meta_architectures import faster_rcnn_meta_arch from nets.nasnet import nasnet_utils from nets.nasnet import pnasnet arg_scope = tf.contrib.framework.arg_scope slim = tf.contrib.slim def pnasnet_large_arg_scope_for_detection(is_batch_norm_training=False): """Defines the default arg scope for the PNASNet Large for object detection. This provides a small edit to switch batch norm training on and off. Args: is_batch_norm_training: Boolean indicating whether to train with batch norm. Returns: An `arg_scope` to use for the PNASNet Large Model. """ imagenet_scope = pnasnet.pnasnet_large_arg_scope() with arg_scope(imagenet_scope): with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc: return sc def _filter_scaling(reduction_indices, start_cell_num): """Compute the expected filter scaling at given PNASNet cell start_cell_num. In the pnasnet.py code, filter_scaling starts at 1.0. We instead adapt filter scaling to depend on the starting cell. At first cells, before any reduction, filter_scalling is 1.0. With passing any reduction cell, the filter_scaling is multiplied by 2. Args: reduction_indices: list of int indices. start_cell_num: int. Returns: filter_scaling: float. """ filter_scaling = 1.0 for ind in reduction_indices: if ind < start_cell_num: filter_scaling *= 2.0 return filter_scaling # Note: This is largely a copy of _build_pnasnet_base inside pnasnet.py but # with special edits to remove instantiation of the stem and the special # ability to receive as input a pair of hidden states. It constructs only # a sub-network from the original PNASNet model, starting from the # start_cell_num cell and with modified final layer. def _build_pnasnet_base( hidden_previous, hidden, normal_cell, hparams, true_cell_num, start_cell_num): """Constructs a PNASNet image model for proposal classifier features.""" # Find where to place the reduction cells or stride normal cells reduction_indices = nasnet_utils.calc_reduction_layers( hparams.num_cells, hparams.num_reduction_layers) filter_scaling = _filter_scaling(reduction_indices, start_cell_num) # Note: The None is prepended to match the behavior of _imagenet_stem() cell_outputs = [None, hidden_previous, hidden] net = hidden # Run the cells for cell_num in range(start_cell_num, hparams.num_cells): is_reduction = cell_num in reduction_indices stride = 2 if is_reduction else 1 if is_reduction: filter_scaling *= hparams.filter_scaling_rate prev_layer = cell_outputs[-2] net = normal_cell( net, scope='cell_{}'.format(cell_num), filter_scaling=filter_scaling, stride=stride, prev_layer=prev_layer, cell_num=true_cell_num) true_cell_num += 1 cell_outputs.append(net) # Final nonlinearity. # Note that we have dropped the final pooling, dropout and softmax layers # from the default pnasnet version. with tf.variable_scope('final_layer'): net = tf.nn.relu(net) return net # TODO(shlens): Only fixed_shape_resizer is currently supported for PNASNet # featurization. The reason for this is that pnasnet.py only supports # inputs with fully known shapes. We need to update pnasnet.py to handle # shapes not known at compile time. class FasterRCNNPNASFeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN with PNASNet feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 16. """ if first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 16.') super(FasterRCNNPNASFeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN with PNAS preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Extracts features using the first half of the PNASNet network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] end_points: A dictionary mapping feature extractor tensor names to tensors Raises: ValueError: If the created network is missing the required activation. """ del scope if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) with slim.arg_scope(pnasnet_large_arg_scope_for_detection( is_batch_norm_training=self._train_batch_norm)): with arg_scope([slim.conv2d, slim.batch_norm, slim.separable_conv2d], reuse=self._reuse_weights): _, end_points = pnasnet.build_pnasnet_large( preprocessed_inputs, num_classes=None, is_training=self._is_training, final_endpoint='Cell_7') # Note that both 'Cell_6' and 'Cell_7' have equal depth = 2160. # Cell_7 is the last cell before second reduction. rpn_feature_map = tf.concat([end_points['Cell_6'], end_points['Cell_7']], 3) # pnasnet.py does not maintain the batch size in the first dimension. # This work around permits us retaining the batch for below. batch = preprocessed_inputs.get_shape().as_list()[0] shape_without_batch = rpn_feature_map.get_shape().as_list()[1:] rpn_feature_map_shape = [batch] + shape_without_batch rpn_feature_map.set_shape(rpn_feature_map_shape) return rpn_feature_map, end_points def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the PNASNet network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ del scope # Number of used stem cells. num_stem_cells = 2 # Note that we always feed into 2 layers of equal depth # where the first N channels corresponds to previous hidden layer # and the second N channels correspond to the final hidden layer. hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3) # Note that what follows is largely a copy of build_pnasnet_large() within # pnasnet.py. We are copying to minimize code pollution in slim. # TODO(shlens,skornblith): Determine the appropriate drop path schedule. # For now the schedule is the default (1.0->0.7 over 250,000 train steps). hparams = pnasnet.large_imagenet_config() if not self._is_training: hparams.set_hparam('drop_path_keep_prob', 1.0) # Calculate the total number of cells in the network total_num_cells = hparams.num_cells + num_stem_cells normal_cell = pnasnet.PNasNetNormalCell( hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps) with arg_scope([slim.dropout, nasnet_utils.drop_path], is_training=self._is_training): with arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format): # This corresponds to the cell number just past 'Cell_7' used by # _extract_proposal_features(). start_cell_num = 8 true_cell_num = start_cell_num + num_stem_cells with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()): net = _build_pnasnet_base( hidden_previous, hidden, normal_cell=normal_cell, hparams=hparams, true_cell_num=true_cell_num, start_cell_num=start_cell_num) proposal_classifier_features = net return proposal_classifier_features def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for PNASNet checkpoints. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in tf.global_variables(): if variable.op.name.startswith( first_stage_feature_extractor_scope): var_name = variable.op.name.replace( first_stage_feature_extractor_scope + '/', '') var_name += '/ExponentialMovingAverage' variables_to_restore[var_name] = variable if variable.op.name.startswith( second_stage_feature_extractor_scope): var_name = variable.op.name.replace( second_stage_feature_extractor_scope + '/', '') var_name += '/ExponentialMovingAverage' variables_to_restore[var_name] = variable return variables_to_restore
PyTorch/Classification/GPUNet/triton/225ms-D/runner
runner
config_NVIDIA-DGX-1-(1x-V100-32GB)
batching: dynamic checkpoints: - name: 2.25ms-D url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_d2_pyt_ckpt/versions/21.12.0_amp/zip configurations: - checkpoint: 2.25ms-D parameters: backend_accelerator: trt checkpoint: 2.25ms-D device_kind: gpu export_format: onnx export_precision: fp16 format: onnx max_batch_size: 64 number_of_model_instances: 2 precision: fp16 tensorrt_capture_cuda_graph: 0 torch_jit: none container_version: '21.12' datasets: - name: imagenet datasets_dir: datasets ensemble_model_name: null framework: PyTorch measurement_steps_offline: 8 measurement_steps_online: 32 model_name: GPUnet performance_tool: model_analyzer triton_container_image: nvcr.io/nvidia/tritonserver:21.12-py3 triton_custom_operations: null triton_dockerfile: null triton_load_model_method: explicit
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/scripts
scripts
run_traffic
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. : ${SEED:=1} : ${LR:=1e-3} : ${NGPU:=8} : ${BATCH_SIZE:=1024} : ${EPOCHS:=20} python -m torch.distributed.run --nproc_per_node=${NGPU} train.py \ --dataset traffic \ --data_path /data/processed/traffic_bin \ --batch_size=${BATCH_SIZE} \ --sample 450000 50000 \ --lr ${LR} \ --epochs ${EPOCHS} \ --seed ${SEED} \ --use_amp \ --results /results/TFT_traffic_bs${NGPU}x${BATCH_SIZE}_lr${LR}/seed_${SEED}
PyTorch/Classification/ConvNets/efficientnet/training/TF32
TF32
DGXA100_efficientnet-b0_TF32
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b0 --precision TF32 --mode convergence --platform DGXA100 /imagenet --workspace ${1:-./} --raport-file raport.json
PyTorch/SpeechSynthesis/FastPitch/triton/scripts/docker
docker
triton_inference_server
#!/usr/bin/env bash # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:=all} docker run --rm -d \ -p 8000:8000 \ -p 8001:8001 \ -p 8002:8002 \ --runtime=nvidia \ -e NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES} \ -v ${MODEL_REPOSITORY_PATH}:${MODEL_REPOSITORY_PATH} \ --shm-size=1g \ --ulimit memlock=-1 \ --ulimit stack=67108864 \ nvcr.io/nvidia/tritonserver:21.02-py3 tritonserver \ --model-store=${MODEL_REPOSITORY_PATH} \ --exit-on-error=true \ --model-control-mode=explicit
TensorFlow/LanguageModeling/BERT/scripts
scripts
run_pretraining_lamb
#!/usr/bin/env bash # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. echo "Container nvidia build = " $NVIDIA_BUILD_ID train_batch_size_phase1=${1:-64} train_batch_size_phase2=${2:-8} eval_batch_size=${3:-8} learning_rate_phase1=${4:-"7.5e-4"} learning_rate_phase2=${5:-"5e-4"} precision=${6:-"fp16"} use_xla=${7:-"true"} num_gpus=${8:-8} warmup_steps_phase1=${9:-"2000"} warmup_steps_phase2=${10:-"200"} train_steps=${11:-7820} save_checkpoints_steps=${12:-100} num_accumulation_steps_phase1=${13:-128} num_accumulation_steps_phase2=${14:-512} bert_model=${15:-"large"} DATA_DIR=data export DATA_DIR=$DATA_DIR GBS1=$(expr $train_batch_size_phase1 \* $num_gpus \* $num_accumulation_steps_phase1) GBS2=$(expr $train_batch_size_phase2 \* $num_gpus \* $num_accumulation_steps_phase2) printf -v TAG "tf_bert_pretraining_lamb_%s_%s_gbs1%d_gbs2%d" "$bert_model" "$precision" $GBS1 $GBS2 DATESTAMP=`date +'%y%m%d%H%M%S'` #Edit to save logs & checkpoints in a different directory RESULTS_DIR=${RESULTS_DIR:-/results/${TAG}_${DATESTAMP}} LOGFILE=$RESULTS_DIR/$TAG.$DATESTAMP.log mkdir -m 777 -p $RESULTS_DIR printf "Saving checkpoints to %s\n" "$RESULTS_DIR" printf "Logs written to %s\n" "$LOGFILE" export RESULTS_DIR=$RESULTS_DIR printf -v SCRIPT_ARGS "%d %d %d %e %e %s %s %d %d %d %d %d %d %d %s %s" \ $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 \ $learning_rate_phase2 "$precision" "$use_xla" $num_gpus $warmup_steps_phase1 \ $warmup_steps_phase2 $train_steps $save_checkpoints_steps \ $num_accumulation_steps_phase1 $num_accumulation_steps_phase2 "$bert_model" # RUN PHASE 1 bash scripts/run_pretraining_lamb_phase1.sh $SCRIPT_ARGS |& tee -a $LOGFILE # RUN PHASE 2 bash scripts/run_pretraining_lamb_phase2.sh $SCRIPT_ARGS |& tee -a $LOGFILE
TensorFlow/Detection/SSD/models/research/slim/datasets
datasets
imagenet
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides data for the ImageNet ILSVRC 2012 Dataset plus some bounding boxes. Some images have one or more bounding boxes associated with the label of the image. See details here: http://image-net.org/download-bboxes ImageNet is based upon WordNet 3.0. To uniquely identify a synset, we use "WordNet ID" (wnid), which is a concatenation of POS ( i.e. part of speech ) and SYNSET OFFSET of WordNet. For more information, please refer to the WordNet documentation[http://wordnet.princeton.edu/wordnet/documentation/]. "There are bounding boxes for over 3000 popular synsets available. For each synset, there are on average 150 images with bounding boxes." WARNING: Don't use for object detection, in this case all the bounding boxes of the image belong to just one class. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from six.moves import urllib import tensorflow as tf from datasets import dataset_utils slim = tf.contrib.slim # TODO(nsilberman): Add tfrecord file type once the script is updated. _FILE_PATTERN = '%s-*' _SPLITS_TO_SIZES = { 'train': 1281167, 'validation': 50000, } _ITEMS_TO_DESCRIPTIONS = { 'image': 'A color image of varying height and width.', 'label': 'The label id of the image, integer between 0 and 999', 'label_text': 'The text of the label.', 'object/bbox': 'A list of bounding boxes.', 'object/label': 'A list of labels, one per each object.', } _NUM_CLASSES = 1001 # If set to false, will not try to set label_to_names in dataset # by reading them from labels.txt or github. LOAD_READABLE_NAMES = True def create_readable_names_for_imagenet_labels(): """Create a dict mapping label id to human readable string. Returns: labels_to_names: dictionary where keys are integers from to 1000 and values are human-readable names. We retrieve a synset file, which contains a list of valid synset labels used by ILSVRC competition. There is one synset one per line, eg. # n01440764 # n01443537 We also retrieve a synset_to_human_file, which contains a mapping from synsets to human-readable names for every synset in Imagenet. These are stored in a tsv format, as follows: # n02119247 black fox # n02119359 silver fox We assign each synset (in alphabetical order) an integer, starting from 1 (since 0 is reserved for the background class). Code is based on https://github.com/tensorflow/models/blob/master/research/inception/inception/data/build_imagenet_data.py#L463 """ # pylint: disable=g-line-too-long base_url = 'https://raw.githubusercontent.com/tensorflow/models/master/research/inception/inception/data/' synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url) synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url) filename, _ = urllib.request.urlretrieve(synset_url) synset_list = [s.strip() for s in open(filename).readlines()] num_synsets_in_ilsvrc = len(synset_list) assert num_synsets_in_ilsvrc == 1000 filename, _ = urllib.request.urlretrieve(synset_to_human_url) synset_to_human_list = open(filename).readlines() num_synsets_in_all_imagenet = len(synset_to_human_list) assert num_synsets_in_all_imagenet == 21842 synset_to_human = {} for s in synset_to_human_list: parts = s.strip().split('\t') assert len(parts) == 2 synset = parts[0] human = parts[1] synset_to_human[synset] = human label_index = 1 labels_to_names = {0: 'background'} for synset in synset_list: name = synset_to_human[synset] labels_to_names[label_index] = name label_index += 1 return labels_to_names def get_split(split_name, dataset_dir, file_pattern=None, reader=None): """Gets a dataset tuple with instructions for reading ImageNet. Args: split_name: A train/test split name. dataset_dir: The base directory of the dataset sources. file_pattern: The file pattern to use when matching the dataset sources. It is assumed that the pattern contains a '%s' string so that the split name can be inserted. reader: The TensorFlow reader type. Returns: A `Dataset` namedtuple. Raises: ValueError: if `split_name` is not a valid train/test split. """ if split_name not in _SPLITS_TO_SIZES: raise ValueError('split name %s was not recognized.' % split_name) if not file_pattern: file_pattern = _FILE_PATTERN file_pattern = os.path.join(dataset_dir, file_pattern % split_name) # Allowing None in the signature so that dataset_factory can use the default. if reader is None: reader = tf.TFRecordReader keys_to_features = { 'image/encoded': tf.FixedLenFeature( (), tf.string, default_value=''), 'image/format': tf.FixedLenFeature( (), tf.string, default_value='jpeg'), 'image/class/label': tf.FixedLenFeature( [], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature( [], dtype=tf.string, default_value=''), 'image/object/bbox/xmin': tf.VarLenFeature( dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature( dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature( dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature( dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature( dtype=tf.int64), } items_to_handlers = { 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'), 'label': slim.tfexample_decoder.Tensor('image/class/label'), 'label_text': slim.tfexample_decoder.Tensor('image/class/text'), 'object/bbox': slim.tfexample_decoder.BoundingBox( ['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'), 'object/label': slim.tfexample_decoder.Tensor('image/object/class/label'), } decoder = slim.tfexample_decoder.TFExampleDecoder( keys_to_features, items_to_handlers) labels_to_names = None if LOAD_READABLE_NAMES: if dataset_utils.has_labels(dataset_dir): labels_to_names = dataset_utils.read_label_file(dataset_dir) else: labels_to_names = create_readable_names_for_imagenet_labels() dataset_utils.write_label_file(labels_to_names, dataset_dir) return slim.dataset.Dataset( data_sources=file_pattern, reader=reader, decoder=decoder, num_samples=_SPLITS_TO_SIZES[split_name], items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, num_classes=_NUM_CLASSES, labels_to_names=labels_to_names)
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util
util
timer
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_TIMER_H #define TT2I_TIMER_H #include <cassert> #include <chrono> namespace tts { class Timer { public: using Clock = std::chrono::high_resolution_clock; /** * @brief Create a new timer in a stopped state. */ Timer() : m_total(0), m_start(), m_running(false) { // do nothing } /** * @brief Start the timer. Only stopped timers can be started. */ void start() { assert(!m_running); m_running = true; m_start = std::chrono::high_resolution_clock::now(); } /** * @brief Stop the timer. Only running timers can be stopped. */ void stop() { assert(m_running); m_running = false; const auto elapsed = Clock::now() - m_start; const double seconds = std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count() / 1000000.0; m_total += seconds; } /** * @brief Get the current duration of the timer. Only stopped timers can be * polled. * * @return */ double poll() const { assert(!m_running); return m_total; } /** * @brief Reset the timer to zero. Running timers will be stopped. */ void reset() { m_running = false; m_total = 0; } private: double m_total; Clock::time_point m_start; bool m_running; }; } // namespace tts #endif
TensorFlow2/Segmentation/Contrib/UNet3P/data_preparation
data_preparation
preprocess_data
""" Convert LiTS 2017 (Liver Tumor Segmentation) data into UNet3+ data format LiTS: https://competitions.codalab.org/competitions/17094 """ import os import sys from glob import glob from pathlib import Path from tqdm import tqdm import numpy as np import multiprocessing as mp import cv2 import nibabel as nib import hydra from omegaconf import DictConfig sys.path.append(os.path.abspath("./")) from utils.general_utils import create_directory, join_paths from utils.images_utils import resize_image def read_nii(filepath): """ Reads .nii file and returns pixel array """ ct_scan = nib.load(filepath).get_fdata() # TODO: Verify images orientation # in both train and test set, especially on train scan 130 ct_scan = np.rot90(np.array(ct_scan)) return ct_scan def crop_center(img, croph, cropw): """ Center crop on given height and width """ height, width = img.shape[:2] starth = height // 2 - (croph // 2) startw = width // 2 - (cropw // 2) return img[starth:starth + croph, startw:startw + cropw, :] def linear_scale(img): """ First convert image to range of 0-1 and them scale to 255 """ img = (img - img.min(axis=(0, 1))) / (img.max(axis=(0, 1)) - img.min(axis=(0, 1))) return img * 255 def clip_scan(img, min_value, max_value): """ Clip scan to given range """ return np.clip(img, min_value, max_value) def resize_scan(scan, new_height, new_width, scan_type): """ Resize CT scan to given size """ scan_shape = scan.shape resized_scan = np.zeros((new_height, new_width, scan_shape[2]), dtype=scan.dtype) resize_method = cv2.INTER_CUBIC if scan_type == "image" else cv2.INTER_NEAREST for start in range(0, scan_shape[2], scan_shape[1]): end = start + scan_shape[1] if end >= scan_shape[2]: end = scan_shape[2] resized_scan[:, :, start:end] = resize_image( scan[:, :, start:end], new_height, new_width, resize_method ) return resized_scan def save_images(scan, save_path, img_index): """ Based on UNet3+ requirement "input image had three channels, including the slice to be segmented and the upper and lower slices, which was cropped to 320×320" save each scan as separate image with previous and next scan concatenated. """ scan_shape = scan.shape for index in range(scan_shape[-1]): before_index = index - 1 if (index - 1) > 0 else 0 after_index = index + 1 if (index + 1) < scan_shape[-1] else scan_shape[-1] - 1 new_img_path = join_paths(save_path, f"image_{img_index}_{index}.png") new_image = np.stack( ( scan[:, :, before_index], scan[:, :, index], scan[:, :, after_index] ) , axis=-1) new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR) # RGB to BGR cv2.imwrite(new_img_path, new_image) # save the images as .png def save_mask(scan, save_path, mask_index): """ Save each scan as separate mask """ for index in range(scan.shape[-1]): new_mask_path = join_paths(save_path, f"mask_{mask_index}_{index}.png") cv2.imwrite(new_mask_path, scan[:, :, index]) # save grey scale image def extract_image(cfg, image_path, save_path, scan_type="image", ): """ Extract image from given scan path """ _, index = str(Path(image_path).stem).split("-") scan = read_nii(image_path) scan = resize_scan( scan, cfg.DATA_PREPARATION.RESIZED_HEIGHT, cfg.DATA_PREPARATION.RESIZED_WIDTH, scan_type ) if scan_type == "image": scan = clip_scan( scan, cfg.DATA_PREPARATION.SCAN_MIN_VALUE, cfg.DATA_PREPARATION.SCAN_MAX_VALUE ) scan = linear_scale(scan) scan = np.uint8(scan) save_images(scan, save_path, index) else: # 0 for background/non-lesion, 1 for liver, 2 for lesion/tumor # merging label 2 into label 1, because lesion/tumor is part of liver scan = np.where(scan != 0, 1, scan) # scan = np.where(scan==2, 1, scan) scan = np.uint8(scan) save_mask(scan, save_path, index) def extract_images(cfg, images_path, save_path, scan_type="image", ): """ Extract images paths using multiprocessing and pass to extract_image function for further processing . """ # create pool process_count = np.clip(mp.cpu_count() - 2, 1, 20) # less than 20 workers pool = mp.Pool(process_count) for image_path in tqdm(images_path): pool.apply_async(extract_image, args=(cfg, image_path, save_path, scan_type), ) # close pool pool.close() pool.join() @hydra.main(version_base=None, config_path="../configs", config_name="config") def preprocess_lits_data(cfg: DictConfig): """ Preprocess LiTS 2017 (Liver Tumor Segmentation) data by extractions images and mask into UNet3+ data format """ train_images_names = glob( join_paths( cfg.WORK_DIR, cfg.DATA_PREPARATION.SCANS_TRAIN_DATA_PATH, "volume-*.nii" ) ) train_mask_names = glob( join_paths( cfg.WORK_DIR, cfg.DATA_PREPARATION.SCANS_TRAIN_DATA_PATH, "segmentation-*.nii" ) ) assert len(train_images_names) == len(train_mask_names), \ "Train volumes and segmentations are not same in length" val_images_names = glob( join_paths( cfg.WORK_DIR, cfg.DATA_PREPARATION.SCANS_VAL_DATA_PATH, "volume-*.nii" ) ) val_mask_names = glob( join_paths( cfg.WORK_DIR, cfg.DATA_PREPARATION.SCANS_VAL_DATA_PATH, "segmentation-*.nii" ) ) assert len(val_images_names) == len(val_mask_names), \ "Validation volumes and segmentations are not same in length" train_images_names = sorted(train_images_names) train_mask_names = sorted(train_mask_names) val_images_names = sorted(val_images_names) val_mask_names = sorted(val_mask_names) train_images_path = join_paths( cfg.WORK_DIR, cfg.DATASET.TRAIN.IMAGES_PATH ) train_mask_path = join_paths( cfg.WORK_DIR, cfg.DATASET.TRAIN.MASK_PATH ) val_images_path = join_paths( cfg.WORK_DIR, cfg.DATASET.VAL.IMAGES_PATH ) val_mask_path = join_paths( cfg.WORK_DIR, cfg.DATASET.VAL.MASK_PATH ) create_directory(train_images_path) create_directory(train_mask_path) create_directory(val_images_path) create_directory(val_mask_path) print("\nExtracting train images") extract_images( cfg, train_images_names, train_images_path, scan_type="image" ) print("\nExtracting train mask") extract_images( cfg, train_mask_names, train_mask_path, scan_type="mask" ) print("\nExtracting val images") extract_images( cfg, val_images_names, val_images_path, scan_type="image" ) print("\nExtracting val mask") extract_images( cfg, val_mask_names, val_mask_path, scan_type="mask" ) if __name__ == '__main__': preprocess_lits_data()
PyTorch/Classification/ConvNets/image_classification/models
models
entrypoints
# Copyright (c) 2018-2019, NVIDIA CORPORATION # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. def nvidia_efficientnet(type='efficient-b0', pretrained=True, **kwargs): """Constructs a EfficientNet model. For detailed information on model input and output, training recipies, inference and performance visit: github.com/NVIDIA/DeepLearningExamples and/or ngc.nvidia.com Args: pretrained (bool, True): If True, returns a model pretrained on IMAGENET dataset. """ from .efficientnet import _ce return _ce(type)(pretrained=pretrained, **kwargs) def nvidia_convnets_processing_utils(): import numpy as np import torch from PIL import Image import torchvision.transforms as transforms import numpy as np import json import requests import validators class Processing: @staticmethod def prepare_input_from_uri(uri, cuda=False): img_transforms = transforms.Compose( [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()] ) if (validators.url(uri)): img = Image.open(requests.get(uri, stream=True).raw) else: img = Image.open(uri) img = img_transforms(img) with torch.no_grad(): # mean and std are not multiplied by 255 as they are in training script # torch dataloader reads data into bytes whereas loading directly # through PIL creates a tensor with floats in [0,1] range mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1) std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1) img = img.float() if cuda: mean = mean.cuda() std = std.cuda() img = img.cuda() input = img.unsqueeze(0).sub_(mean).div_(std) return input @staticmethod def pick_n_best(predictions, n=5): predictions = predictions.float().cpu().numpy() topN = np.argsort(-1*predictions, axis=-1)[:,:n] imgnet_classes = Processing.get_imgnet_classes() results=[] for idx,case in enumerate(topN): r = [] for c, v in zip(imgnet_classes[case], predictions[idx, case]): r.append((f"{c}", f"{100*v:.1f}%")) print(f"sample {idx}: {r}") results.append(r) return results @staticmethod def get_imgnet_classes(): import os import json imgnet_classes_json = "LOC_synset_mapping.json" if not os.path.exists(imgnet_classes_json): print("Downloading Imagenet Classes names.") import urllib urllib.request.urlretrieve( "https://raw.githubusercontent.com/NVIDIA/DeepLearningExamples/master/PyTorch/Classification/ConvNets/LOC_synset_mapping.json", filename=imgnet_classes_json) print("Downloading finished.") imgnet_classes = np.array(json.load(open(imgnet_classes_json, "r"))) return imgnet_classes return Processing()
CUDA-Optimized/FastSpeech/fastspeech
fastspeech
audio
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np def dynamic_range_compression(x, C=1, clip_val=1e-5): """ PARAMS ------ C: compression factor """ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) def dynamic_range_decompression(x, C=1): """ PARAMS ------ C: compression factor used to compress """ return np.exp(x) / C
PyTorch/DrugDiscovery/MoFlow/moflow/model
model
glow
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2020 Chengxi Zang # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from typing import Tuple import torch import torch.nn as nn from moflow.model.basic import ActNorm, InvConv2dLU, InvConv2d from moflow.model.coupling import AffineCoupling, GraphAffineCoupling class Flow(nn.Module): def __init__(self, in_channel, hidden_channels, conv_lu=2, mask_swap=False): super(Flow, self).__init__() # More stable to support more flows self.actnorm = ActNorm(num_channels=in_channel, num_dims=4) if conv_lu == 0: self.invconv = InvConv2d(in_channel) elif conv_lu == 1: self.invconv = InvConv2dLU(in_channel) elif conv_lu == 2: self.invconv = None else: raise ValueError("conv_lu in {0,1,2}, 0:InvConv2d, 1:InvConv2dLU, 2:none-just swap to update in coupling") self.coupling = AffineCoupling(in_channel, hidden_channels, mask_swap=mask_swap) def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: out, logdet = self.actnorm(input) if self.invconv is not None: out, det1 = self.invconv(out) else: det1 = 0 out, det2 = self.coupling(out) logdet = logdet + det1 if det2 is not None: logdet = logdet + det2 return out, logdet @torch.jit.export def reverse(self, output: torch.Tensor) -> torch.Tensor: input = self.coupling.reverse(output) if self.invconv is not None: input = self.invconv.reverse(input) input = self.actnorm.reverse(input) return input class FlowOnGraph(nn.Module): def __init__(self, n_node, in_dim, hidden_dim_dict, masked_row): super(FlowOnGraph, self).__init__() self.n_node = n_node self.in_dim = in_dim self.hidden_dim_dict = hidden_dim_dict self.masked_row = masked_row self.actnorm = ActNorm(num_channels=n_node, num_dims=3) self.coupling = GraphAffineCoupling(n_node, in_dim, hidden_dim_dict, masked_row) def forward(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: adj, input = graph out, logdet = self.actnorm(input) det1 = 0 out, det2 = self.coupling((adj, out)) logdet = logdet + det1 if det2 is not None: logdet = logdet + det2 return out, logdet @torch.jit.export def reverse(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor: adj, output = graph input = self.coupling.reverse((adj, output)) input = self.actnorm.reverse(input) return input class Block(nn.Module): def __init__(self, in_channel, n_flow, squeeze_fold, hidden_channels, conv_lu=2): super(Block, self).__init__() self.squeeze_fold = squeeze_fold squeeze_dim = in_channel * self.squeeze_fold * self.squeeze_fold self.flows = nn.ModuleList() for i in range(n_flow): if conv_lu in (0, 1): self.flows.append(Flow(squeeze_dim, hidden_channels, conv_lu=conv_lu, mask_swap=False)) else: self.flows.append(Flow(squeeze_dim, hidden_channels, conv_lu=2, mask_swap=bool(i % 2))) def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: out = self._squeeze(input) logdet = 0 for flow in self.flows: out, det = flow(out) logdet = logdet + det out = self._unsqueeze(out) return out, logdet @torch.jit.export def reverse(self, output: torch.Tensor) -> torch.Tensor: input = self._squeeze(output) for flow in self.flows[::-1]: input = flow.reverse(input) unsqueezed = self._unsqueeze(input) return unsqueezed def _squeeze(self, x: torch.Tensor) -> torch.Tensor: """Trade spatial extent for channels. In forward direction, convert each 1x4x4 volume of input into a 4x1x1 volume of output. Args: x (torch.Tensor): Input to squeeze or unsqueeze. reverse (bool): Reverse the operation, i.e., unsqueeze. Returns: x (torch.Tensor): Squeezed or unsqueezed tensor. """ assert len(x.shape) == 4 b_size, n_channel, height, width = x.shape fold = self.squeeze_fold squeezed = x.view(b_size, n_channel, height // fold, fold, width // fold, fold) squeezed = squeezed.permute(0, 1, 3, 5, 2, 4).contiguous() out = squeezed.view(b_size, n_channel * fold * fold, height // fold, width // fold) return out def _unsqueeze(self, x: torch.Tensor) -> torch.Tensor: assert len(x.shape) == 4 b_size, n_channel, height, width = x.shape fold = self.squeeze_fold unsqueezed = x.view(b_size, n_channel // (fold * fold), fold, fold, height, width) unsqueezed = unsqueezed.permute(0, 1, 4, 2, 5, 3).contiguous() out = unsqueezed.view(b_size, n_channel // (fold * fold), height * fold, width * fold) return out class BlockOnGraph(nn.Module): def __init__(self, n_node, in_dim, hidden_dim_dict, n_flow, mask_row_size=1, mask_row_stride=1): super(BlockOnGraph, self).__init__() assert 0 < mask_row_size < n_node self.flows = nn.ModuleList() for i in range(n_flow): start = i * mask_row_stride masked_row =[r % n_node for r in range(start, start+mask_row_size)] self.flows.append(FlowOnGraph(n_node, in_dim, hidden_dim_dict, masked_row=masked_row)) def forward(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: adj, input = graph out = input logdet = 0 for flow in self.flows: out, det = flow((adj, out)) logdet = logdet + det return out, logdet @torch.jit.export def reverse(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor: adj, output = graph input = output for flow in self.flows[::-1]: input = flow.reverse((adj, input)) return input class Glow(nn.Module): def __init__(self, in_channel, n_flow, n_block, squeeze_fold, hidden_channel, conv_lu=2): super(Glow, self).__init__() self.blocks = nn.ModuleList() n_channel = in_channel for i in range(n_block): self.blocks.append(Block(n_channel, n_flow, squeeze_fold, hidden_channel, conv_lu=conv_lu)) def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: logdet = 0 out = input for block in self.blocks: out, det = block(out) logdet = logdet + det return out, logdet @torch.jit.export def reverse(self, z: torch.Tensor) -> torch.Tensor: h = z for i, block in enumerate(self.blocks[::-1]): h = block.reverse(h) return h class GlowOnGraph(nn.Module): def __init__(self, n_node, in_dim, hidden_dim_dict, n_flow, n_block, mask_row_size_list=(2,), mask_row_stride_list=(1,)): super(GlowOnGraph, self).__init__() assert len(mask_row_size_list) == n_block or len(mask_row_size_list) == 1 assert len(mask_row_stride_list) == n_block or len(mask_row_stride_list) == 1 if len(mask_row_size_list) == 1: mask_row_size_list = mask_row_size_list * n_block if len(mask_row_stride_list) == 1: mask_row_stride_list = mask_row_stride_list * n_block self.blocks = nn.ModuleList() for i in range(n_block): mask_row_size = mask_row_size_list[i] mask_row_stride = mask_row_stride_list[i] self.blocks.append(BlockOnGraph(n_node, in_dim, hidden_dim_dict, n_flow, mask_row_size, mask_row_stride)) def forward(self, adj: torch.Tensor, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: logdet = 0 out = x for block in self.blocks: out, det = block((adj, out)) logdet = logdet + det return out, logdet @torch.jit.export def reverse(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor: adj, z = graph input = z for i, block in enumerate(self.blocks[::-1]): input = block.reverse((adj, input)) return input
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime
runtime
gpu_affinity
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES # SPDX-License-Identifier: MIT import collections import itertools import os import pathlib import re import pynvml class Device: # assume nvml returns list of 64 bit ints _nvml_bit_affinity = 64 _nvml_affinity_elements = ( os.cpu_count() + _nvml_bit_affinity - 1 ) // _nvml_bit_affinity def __init__(self, device_idx): super().__init__() self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx) def get_name(self): return pynvml.nvmlDeviceGetName(self.handle) def get_uuid(self): return pynvml.nvmlDeviceGetUUID(self.handle) def get_cpu_affinity(self, scope): if scope == 'socket': nvml_scope = pynvml.NVML_AFFINITY_SCOPE_SOCKET elif scope == 'node': nvml_scope = pynvml.NVML_AFFINITY_SCOPE_NODE else: raise RuntimeError('Unknown scope') affinity_string = '' for j in pynvml.nvmlDeviceGetCpuAffinityWithinScope( self.handle, Device._nvml_affinity_elements, nvml_scope ): # assume nvml returns list of 64 bit ints affinity_string = '{:064b}'.format(j) + affinity_string affinity_list = [int(x) for x in affinity_string] affinity_list.reverse() # so core 0 is in 0th element of list ret = [i for i, e in enumerate(affinity_list) if e != 0] return ret def get_thread_siblings_list(): """ Returns a list of 2-element integer tuples representing pairs of hyperthreading cores. """ path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list' thread_siblings_list = [] pattern = re.compile(r'(\d+)\D(\d+)') for fname in pathlib.Path(path[0]).glob(path[1:]): with open(fname) as f: content = f.read().strip() res = pattern.findall(content) if res: pair = tuple(sorted(map(int, res[0]))) thread_siblings_list.append(pair) thread_siblings_list = list(set(thread_siblings_list)) return thread_siblings_list def build_thread_siblings_dict(siblings_list): siblings_dict = {} for siblings_tuple in siblings_list: for core in siblings_tuple: siblings_dict[core] = siblings_tuple return siblings_dict def group_list_by_key(the_list, key): sorted_list = sorted(the_list, key=key) grouped = [ tuple(group) for key, group in itertools.groupby(sorted_list, key=key) ] return grouped def ungroup_affinities(affinities, scope, cores, min_cores=1, max_cores=None): if scope == 'socket': affinities = [ list(itertools.chain(*zip(*affinity))) for affinity in affinities ] elif scope == 'node': affinities = [ [group[0] for group in affinity] for affinity in affinities ] for gpu_id, affinity in enumerate(affinities): if len(affinity) < min_cores: raise RuntimeError( f'Number of available physical cores for GPU {gpu_id} is less ' f'the predefinied minimum, min_cores={min_cores}, available ' f'physical cores: {affinity} (count={len(affinity)})' ) if max_cores is not None: affinities = [affinity[:max_cores] for affinity in affinities] if cores == 'all_logical': affinities = [ list(itertools.chain(*affinity)) for affinity in affinities ] elif cores == 'single_logical': affinities = [ [group[0] for group in affinity] for affinity in affinities ] else: raise RuntimeError('Unknown cores mode') return affinities def check_affinities(affinities): # sets of cores should be either identical or disjoint for i, j in itertools.product(affinities, affinities): if not set(i) == set(j) and not set(i).isdisjoint(set(j)): raise RuntimeError( f'Sets of cores should be either identical or disjoint, ' f'but got {i} and {j}.' ) def get_affinities(nproc_per_node, scope, exclude_unavailable_cores=True): devices = [Device(i) for i in range(nproc_per_node)] affinities = [dev.get_cpu_affinity(scope) for dev in devices] if exclude_unavailable_cores: available_cores = os.sched_getaffinity(0) affinities = [ sorted(list(set(affinity) & available_cores)) for affinity in affinities ] check_affinities(affinities) return affinities def get_grouped_affinities(nproc_per_node, exclude_unavailable_cores=True): siblings_list = get_thread_siblings_list() siblings_dict = build_thread_siblings_dict(siblings_list) socket_affinities = get_affinities( nproc_per_node, 'socket', exclude_unavailable_cores ) node_affinities = get_affinities( nproc_per_node, 'node', exclude_unavailable_cores ) siblings_key = lambda x: siblings_dict.get(x, (x,)) sibling_node_affinities = [ tuple(group_list_by_key(affinity, key=siblings_key)) for affinity in node_affinities ] sibling_socket_affinities = [ tuple(group_list_by_key(affinity, key=siblings_key)) for affinity in socket_affinities ] socket_node_assigned_cores = collections.defaultdict(list) for socket, node_cores in zip( sibling_socket_affinities, sibling_node_affinities ): socket_node_assigned_cores[socket].extend(node_cores) socket_node_assigned_cores = { key: tuple(sorted(set(value))) for key, value in socket_node_assigned_cores.items() } node_grouping = collections.defaultdict(list) for socket_cores, assigned_cores in socket_node_assigned_cores.items(): unassigned_cores = sorted( list(set(socket_cores) - set(assigned_cores)) ) for assigned_core in assigned_cores: node_grouping[assigned_core].append(assigned_core) for assigned, unassigned in zip( itertools.cycle(assigned_cores), unassigned_cores ): node_grouping[assigned].append(unassigned) node_grouping = {key: tuple(value) for key, value in node_grouping.items()} grouped_affinities = [ tuple(node_grouping[item] for item in sibling_node_affinity) for sibling_node_affinity in sibling_node_affinities ] return grouped_affinities def set_all(gpu_id, nproc_per_node, scope, cores, min_cores, max_cores): """ The process is assigned with all available physical CPU cores recommended by pynvml for the GPU with a given id. Assignment automatically includes available hyperthreading siblings if cores='all_logical'. Args: gpu_id: index of a GPU nproc_per_node: number of processes per node scope: scope for retrieving affinity from pynvml, 'node' or 'socket' cores: 'all_logical' or 'single_logical' """ grouped_affinities = get_grouped_affinities(nproc_per_node) ungrouped_affinities = ungroup_affinities( grouped_affinities, scope, cores, min_cores, max_cores ) os.sched_setaffinity(0, ungrouped_affinities[gpu_id]) def set_single(gpu_id, nproc_per_node, scope, cores, min_cores=1, max_cores=1): """ The process is assigned with the first available physical CPU core from the list of all physical CPU cores recommended by pynvml for the GPU with a given id. Assignment automatically includes available hyperthreading siblings if cores='all_logical'. Args: gpu_id: index of a GPU nproc_per_node: number of processes per node scope: scope for retrieving affinity from pynvml, 'node' or 'socket' cores: 'all_logical' or 'single_logical' """ grouped_affinities = get_grouped_affinities(nproc_per_node) single_grouped_affinities = [group[:1] for group in grouped_affinities] ungrouped_affinities = ungroup_affinities( single_grouped_affinities, scope, cores, min_cores, max_cores ) os.sched_setaffinity(0, ungrouped_affinities[gpu_id]) def set_single_unique( gpu_id, nproc_per_node, scope, cores, min_cores=1, max_cores=1 ): """ The process is assigned with a single unique available physical CPU core from the list of all physical CPU cores recommended by pynvml for the GPU with a given id. Assignment automatically includes available hyperthreading siblings if cores='all_logical'. Args: gpu_id: index of a GPU nproc_per_node: number of processes per node scope: scope for retrieving affinity from pynvml, 'node' or 'socket' cores: 'all_logical' or 'single_logical' """ grouped_affinities = get_grouped_affinities(nproc_per_node) affinities = [] assigned_groups = set() for grouped_affinity in grouped_affinities: for group in grouped_affinity: if group not in assigned_groups: affinities.append([group]) assigned_groups.add(group) break ungrouped_affinities = ungroup_affinities( affinities, scope, cores, min_cores, max_cores ) os.sched_setaffinity(0, ungrouped_affinities[gpu_id]) def set_unique( gpu_id, nproc_per_node, scope, cores, mode, min_cores, max_cores, balanced=True, ): """ The process is assigned with a unique subset of available physical CPU cores from the list of all CPU cores recommended by pynvml for the GPU with a given id. Assignment automatically includes available hyperthreading siblings if cores='all_logical'. Args: gpu_id: index of a GPU nproc_per_node: number of processes per node scope: scope for retrieving affinity from pynvml, 'node' or 'socket' cores: 'all_logical' or 'single_logical' mode: 'unique_contiguous' or 'unique_interleaved' balanced: assign an equal number of physical cores to each process, """ grouped_affinities = get_grouped_affinities(nproc_per_node) grouped_affinities_to_device_ids = collections.defaultdict(list) for idx, grouped_affinity in enumerate(grouped_affinities): grouped_affinities_to_device_ids[tuple(grouped_affinity)].append(idx) # compute minimal number of physical cores per GPU across all GPUs and # sockets, code assigns this number of cores per GPU if balanced == True min_physical_cores_per_gpu = min( [ len(cores) // len(gpus) for cores, gpus in grouped_affinities_to_device_ids.items() ] ) grouped_unique_affinities = [None] * nproc_per_node for ( grouped_affinity, device_ids, ) in grouped_affinities_to_device_ids.items(): devices_per_group = len(device_ids) if balanced: cores_per_device = min_physical_cores_per_gpu grouped_affinity = grouped_affinity[ : devices_per_group * min_physical_cores_per_gpu ] else: cores_per_device = len(grouped_affinity) // devices_per_group for subgroup_id, device_id in enumerate(device_ids): # In theory there should be no difference in performance between # 'interleaved' and 'contiguous' pattern on Intel-based DGX-1, # but 'contiguous' should be better for DGX A100 because on AMD # Rome 4 consecutive cores are sharing L3 cache. # TODO: code doesn't attempt to automatically detect layout of # L3 cache, also external environment may already exclude some # cores, this code makes no attempt to detect it and to align # mapping to multiples of 4. if mode == 'unique_interleaved': unique_grouped_affinity = list( grouped_affinity[subgroup_id::devices_per_group] ) elif mode == 'unique_contiguous': unique_grouped_affinity = list( grouped_affinity[ subgroup_id * cores_per_device : (subgroup_id + 1) * cores_per_device ] ) else: raise RuntimeError('Unknown set_unique mode') grouped_unique_affinities[device_id] = unique_grouped_affinity ungrouped_affinities = ungroup_affinities( grouped_unique_affinities, scope, cores, min_cores, max_cores ) os.sched_setaffinity(0, ungrouped_affinities[gpu_id]) def set_affinity( gpu_id, nproc_per_node, *, mode='unique_contiguous', scope='node', cores='all_logical', balanced=True, min_cores=1, max_cores=None, ): """ The process is assigned with a proper CPU affinity that matches CPU-GPU hardware architecture on a given platform. Usually, setting proper affinity improves and stabilizes the performance of deep learning training workloads. This function assumes that the workload runs in multi-process single-device mode (there are multiple training processes, and each process is running on a single GPU). This is typical for multi-GPU data-parallel training workloads (e.g., using `torch.nn.parallel.DistributedDataParallel`). Available affinity modes: * 'all' - the process is assigned with all available physical CPU cores recommended by pynvml for the GPU with a given id. * 'single' - the process is assigned with the first available physical CPU core from the list of all physical CPU cores recommended by pynvml for the GPU with a given id (multiple GPUs could be assigned with the same CPU core). * 'single_unique' - the process is assigned with a single unique available physical CPU core from the list of all CPU cores recommended by pynvml for the GPU with a given id. * 'unique_interleaved' - the process is assigned with a unique subset of available physical CPU cores from the list of all physical CPU cores recommended by pynvml for the GPU with a given id, cores are assigned with interleaved indexing pattern * 'unique_contiguous' - (the default mode) the process is assigned with a unique subset of available physical CPU cores from the list of all physical CPU cores recommended by pynvml for the GPU with a given id, cores are assigned with contiguous indexing pattern Available "scope" modes: * 'node' - sets the scope for pynvml affinity queries to NUMA node * 'socket' - sets the scope for pynvml affinity queries to processor socket Available "cores" modes: * 'all_logical' - assigns the process with all logical cores associated with a given corresponding physical core (i.e., automatically includes all available hyperthreading siblings) * 'single_logical' - assigns the process with only one logical core associated with a given corresponding physical core (i.e., excludes hyperthreading siblings) 'unique_contiguous' is the recommended mode for deep learning training workloads on NVIDIA DGX machines. Args: gpu_id: integer index of a GPU, value from 0 to 'nproc_per_node' - 1 nproc_per_node: number of processes per node mode: affinity mode scope: scope for retrieving affinity from pynvml, 'node' or 'socket' cores: 'all_logical' or 'single_logical' balanced: assign an equal number of physical cores to each process, affects only 'unique_interleaved' and 'unique_contiguous' affinity modes min_cores: (default=1) the intended minimum number of physical cores per process, code raises RuntimeError if the number of available cores is less than 'min_cores' max_cores: (default=None) the intended maxmimum number of physical cores per process, the list of assigned cores is trimmed to the first 'max_cores' cores if max_cores is not None Returns a set of logical CPU cores on which the process is eligible to run. WARNING: On DGX A100, only half of the CPU cores have direct access to GPUs. set_affinity with scope='node' restricts execution only to the CPU cores directly connected to GPUs. On DGX A100, it will limit the code to half of the CPU cores and half of CPU memory bandwidth (which may be fine for many DL models). Use scope='socket' to use all available DGX A100 CPU cores. WARNING: Intel's OpenMP implementation resets affinity on the first call to an OpenMP function after a fork. It's recommended to run with env variable: `KMP_AFFINITY=disabled` if the affinity set by gpu_affinity should be preserved after a fork (e.g. in PyTorch DataLoader workers). Example: import argparse import os import gpu_affinity import torch def main(): parser = argparse.ArgumentParser() parser.add_argument( '--local_rank', type=int, default=os.getenv('LOCAL_RANK', 0), ) args = parser.parse_args() nproc_per_node = torch.cuda.device_count() affinity = gpu_affinity.set_affinity(args.local_rank, nproc_per_node) print(f'{args.local_rank}: core affinity: {affinity}') if __name__ == "__main__": main() Launch the example with: python -m torch.distributed.launch --nproc_per_node <#GPUs> example.py """ pynvml.nvmlInit() if mode == 'all': set_all(gpu_id, nproc_per_node, scope, cores, min_cores, max_cores) elif mode == 'single': set_single(gpu_id, nproc_per_node, scope, cores) elif mode == 'single_unique': set_single_unique(gpu_id, nproc_per_node, scope, cores) elif mode == 'unique_interleaved' or mode == 'unique_contiguous': set_unique( gpu_id, nproc_per_node, scope, cores, mode, min_cores, max_cores, balanced, ) else: raise RuntimeError('Unknown affinity mode') affinity = os.sched_getaffinity(0) return affinity
TensorFlow2/Recommendation/DLRM_and_DCNv2/tests/feature_specs
feature_specs
different_paths
channel_spec: categorical: - cat_0.bin - cat_1.bin - cat_2.bin - cat_3.bin - cat_4.bin - cat_5.bin - cat_6.bin - cat_7.bin - cat_8.bin - cat_9.bin - cat_10.bin - cat_11.bin - cat_12.bin - cat_13.bin - cat_14.bin - cat_15.bin - cat_16.bin - cat_17.bin - cat_18.bin - cat_19.bin - cat_20.bin - cat_21.bin - cat_22.bin - cat_23.bin - cat_24.bin - cat_25.bin label: - label numerical: &id001 - num_0 - num_1 - num_2 - num_3 - num_4 - num_5 - num_6 - num_7 - num_8 - num_9 - num_10 - num_11 - num_12 feature_spec: cat_0.bin: cardinality: 100000 dtype: int32 cat_1.bin: cardinality: 100000 dtype: int32 cat_10.bin: cardinality: 100000 dtype: int32 cat_11.bin: cardinality: 100000 dtype: int32 cat_12.bin: cardinality: 100000 dtype: int32 cat_13.bin: cardinality: 100000 dtype: int32 cat_14.bin: cardinality: 100000 dtype: int32 cat_15.bin: cardinality: 100000 dtype: int32 cat_16.bin: cardinality: 100000 dtype: int32 cat_17.bin: cardinality: 100000 dtype: int32 cat_18.bin: cardinality: 100000 dtype: int32 cat_19.bin: cardinality: 100000 dtype: int32 cat_2.bin: cardinality: 100000 dtype: int32 cat_20.bin: cardinality: 100000 dtype: int32 cat_21.bin: cardinality: 100000 dtype: int32 cat_22.bin: cardinality: 100000 dtype: int32 cat_23.bin: cardinality: 100000 dtype: int32 cat_24.bin: cardinality: 100000 dtype: int32 cat_25.bin: cardinality: 100000 dtype: int32 cat_3.bin: cardinality: 100000 dtype: int32 cat_4.bin: cardinality: 100000 dtype: int32 cat_5.bin: cardinality: 100000 dtype: int32 cat_6.bin: cardinality: 100000 dtype: int32 cat_7.bin: cardinality: 100000 dtype: int32 cat_8.bin: cardinality: 100000 dtype: int32 cat_9.bin: cardinality: 100000 dtype: int32 label: dtype: bool num_0: dtype: float16 num_1: dtype: float16 num_10: dtype: float16 num_11: dtype: float16 num_12: dtype: float16 num_2: dtype: float16 num_3: dtype: float16 num_4: dtype: float16 num_5: dtype: float16 num_6: dtype: float16 num_7: dtype: float16 num_8: dtype: float16 num_9: dtype: float16 metadata: {} source_spec: test: - features: *id001 files: - differenttnum.notbin type: split_binary - features: - label files: - different2.foo type: split_binary - features: - cat_0.bin files: - tec341234.b53in type: split_binary - features: - cat_1.bin files: - te2341st/cat_6734567361.b563465345in type: split_binary - features: - cat_2.bin files: - test/341234cat_2.bin type: split_binary - features: - cat_3.bin files: - testsadfas/cat_sdfa3.bin type: split_binary - features: - cat_4.bin files: - tessdft/cat_4.bfasdin type: split_binary - features: - cat_5.bin files: - tesdfst/sadfasfcat_5.bin type: split_binary - features: - cat_6.bin files: - tessdfat/cat_6sdf.bin type: split_binary - features: - cat_7.bin files: - tedfasdfst/cat_sdf7.bin type: split_binary - features: - cat_8.bin files: - tessadfasfdt/cat_8.bidfasfdn type: split_binary - features: - cat_9.bin files: - tessdfasst/cadfat_9.bin type: split_binary - features: - cat_10.bin files: - testertwett/cat_10twertwe.bin type: split_binary - features: - cat_11.bin files: - tesertwertt/cat_1ertw1.bin type: split_binary - features: - cat_12.bin files: - teserqwert/cat_12.bitreten type: split_binary - features: - cat_13.bin files: - teetrtwest/cat_13.biywywevgywn type: split_binary - features: - cat_14.bin files: - t5nw5g5w45est/cafw45t_14.bin type: split_binary - features: - cat_15.bin files: - test456f/cat_146w45.bin type: split_binary - features: - cat_16.bin files: - test54wehy/cat_1rhyte46.bin type: split_binary - features: - cat_17.bin files: - testrujyt/cat_174w5.bin type: split_binary - features: - cat_18.bin files: - teste45hy/cat_18.be45hyin type: split_binary - features: - cat_19.bin files: - testrtggsertecat_19.bin type: split_binary - features: - cat_20.bin files: - testuteyutyut/cat_20.bin type: split_binary - features: - cat_21.bin files: - testyuteyutryt/cat_21.bin type: split_binary - features: - cat_22.bin files: - tesyuirty7iut/cat_22.bin type: split_binary - features: - cat_23.bin files: - tesi6r7it/cat_23.bin type: split_binary - features: - cat_24.bin files: - tesutyhergt/cat_24.bin type: split_binary - features: - cat_25.bin files: - testir756irt/cat_25.bin type: split_binary train: - features: *id001 files: - train/rtyerytenumerical.bin type: split_binary - features: - label files: - traiherthern/label.bin type: split_binary - features: - cat_0.bin files: - trairtyertyern/cat_0.bin type: split_binary - features: - cat_1.bin files: - trainrtyerty/cat_1.bin type: split_binary - features: - cat_2.bin files: - trainyrtyeyjdfg/cat_2.bin type: split_binary - features: - cat_3.bin files: - trainghdfyjert/cat_3.bin type: split_binary - features: - cat_4.bin files: - train/gyutyhcat_4.bin type: split_binary - features: - cat_5.bin files: - train/rethercat_5.bin type: split_binary - features: - cat_6.bin files: - traertyeryin/cat_6.bin type: split_binary - features: - cat_7.bin files: - trartyertyin/cat_7.bin type: split_binary - features: - cat_8.bin files: - train/cayretyertyt_8.bin type: split_binary - features: - cat_9.bin files: - train/cat_hfdhfhgd.bin type: split_binary - features: - cat_10.bin files: - traintryerdtyin type: split_binary - features: - cat_11.bin files: - trah56jh7w45gein type: split_binary - features: - cat_12.bin files: - train67j56e5g.bin type: split_binary - features: - cat_13.bin files: - tr456tyw4f5in type: split_binary - features: - cat_14.bin files: - tra56egin type: split_binary - features: - cat_15.bin files: - train56hrtdf5.bin type: split_binary - features: - cat_16.bin files: - traie56utgfhbbin type: split_binary - features: - cat_17.bin files: - tra56evtdygvbin type: split_binary - features: - cat_18.bin files: - tra566dtyghbi.bin type: split_binary - features: - cat_19.bin files: - trai9uijk.bin type: split_binary - features: - cat_20.bin files: - traiywe5tin type: split_binary - features: - cat_21.bin files: - trairtyhgfbin type: split_binary - features: - cat_22.bin files: - trainrfghbin type: split_binary - features: - cat_23.bin files: - trairtgfin type: split_binary - features: - cat_24.bin files: - trairtfgin type: split_binary - features: - cat_25.bin files: - traii76tyn type: split_binary
PyTorch/SpeechSynthesis/HiFiGAN/common/text
text
symbols
""" from https://github.com/keithito/tacotron """ ''' Defines the set of symbols used in text input to the model. The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. ''' from .cmudict import valid_symbols # Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters): _arpabet = ['@' + s for s in valid_symbols] def get_symbols(symbol_set='english_basic'): if symbol_set == 'english_basic': _pad = '_' _punctuation = '!\'(),.:;? ' _special = '-' _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' symbols = list(_pad + _special + _punctuation + _letters) + _arpabet elif symbol_set == 'english_basic_lowercase': _pad = '_' _punctuation = '!\'"(),.:;? ' _special = '-' _letters = 'abcdefghijklmnopqrstuvwxyz' symbols = list(_pad + _special + _punctuation + _letters) + _arpabet elif symbol_set == 'english_expanded': _punctuation = '!\'",.:;? ' _math = '#%&*+-/[]()' _special = '_@©°½—₩€$' _accented = 'áçéêëñöøćž' _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' symbols = list(_punctuation + _math + _special + _accented + _letters) + _arpabet else: raise Exception("{} symbol set does not exist".format(symbol_set)) return symbols def get_pad_idx(symbol_set='english_basic'): if symbol_set in {'english_basic', 'english_basic_lowercase'}: return 0 else: raise Exception("{} symbol set not used yet".format(symbol_set))
PyTorch/SpeechSynthesis/Tacotron2/platform
platform
DGXA100_tacotron2_AMP_1NGPU_train
mkdir -p output python train.py -m Tacotron2 -o output/ --amp -lr 1e-3 --epochs 1501 -bs 128 --weight-decay 1e-6 --grad-clip-thresh 1.0 --cudnn-enabled --load-mel-from-disk --training-files=filelists/ljs_mel_text_train_filelist.txt --validation-files=filelists/ljs_mel_text_val_filelist.txt --log-file nvlog.json --anneal-steps 500 1000 1500 --anneal-factor 0.3
TensorFlow2/Detection/Efficientdet/object_detection
object_detection
preprocessor
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Preprocess images and bounding boxes for detection. We perform two sets of operations in preprocessing stage: (a) operations that are applied to both training and testing data, (b) operations that are applied only to training data for the purpose of data augmentation. A preprocessing function receives a set of inputs, e.g. an image and bounding boxes, performs an operation on them, and returns them. Some examples are: randomly cropping the image, randomly mirroring the image, randomly changing the brightness, contrast, hue and randomly jittering the bounding boxes. The image is a rank 4 tensor: [1, height, width, channels] with dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where in each row there is a box with [ymin xmin ymax xmax]. Boxes are in normalized coordinates meaning their coordinate values range in [0, 1] Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing functions receive a rank 3 tensor for processing the image. Thus, inside the preprocess function we squeeze the image to become a rank 3 tensor and then we pass it to the functions. At the end of the preprocess we expand the image back to rank 4. """ import tensorflow.compat.v1 as tf from object_detection import box_list def _flip_boxes_left_right(boxes): """Left-right flip the boxes. Args: boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. Returns: Flipped boxes. """ ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) flipped_xmin = tf.subtract(1.0, xmax) flipped_xmax = tf.subtract(1.0, xmin) flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1) return flipped_boxes def _flip_masks_left_right(masks): """Left-right flip masks. Args: masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. Returns: flipped masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. """ return masks[:, :, ::-1] def keypoint_flip_horizontal(keypoints, flip_point, flip_permutation, scope=None): """Flips the keypoints horizontally around the flip_point. This operation flips the x coordinate for each keypoint around the flip_point and also permutes the keypoints in a manner specified by flip_permutation. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] flip_point: (float) scalar tensor representing the x coordinate to flip the keypoints around. flip_permutation: rank 1 int32 tensor containing the keypoint flip permutation. This specifies the mapping from original keypoint indices to the flipped keypoint indices. This is used primarily for keypoints that are not reflection invariant. E.g. Suppose there are 3 keypoints representing ['head', 'right_eye', 'left_eye'], then a logical choice for flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye' and 'right_eye' after a horizontal flip. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'FlipHorizontal'): keypoints = tf.transpose(keypoints, [1, 0, 2]) keypoints = tf.gather(keypoints, flip_permutation) v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) u = flip_point * 2.0 - u new_keypoints = tf.concat([v, u], 2) new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) return new_keypoints def random_horizontal_flip(image, boxes=None, masks=None, keypoints=None, keypoint_flip_permutation=None, seed=None): """Randomly flips the image and detections horizontally. The probability of flipping the image is 50%. Args: image: rank 3 float32 tensor with shape [height, width, channels]. boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the bounding boxes. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip permutation. seed: random seed Returns: image: image which is the same shape as input image. If boxes, masks, keypoints, and keypoint_flip_permutation are not None, the function also returns the following tensors. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] Raises: ValueError: if keypoints are provided but keypoint_flip_permutation is not. """ def _flip_image(image): # flip image image_flipped = tf.image.flip_left_right(image) return image_flipped if keypoints is not None and keypoint_flip_permutation is None: raise ValueError( 'keypoints are provided but keypoints_flip_permutation is not provided') with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]): result = [] # random variable defining whether to do flip or not do_a_flip_random = tf.greater(tf.random_uniform([], seed=seed), 0.5) # flip image image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image) result.append(image) # flip boxes if boxes is not None: boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes), lambda: boxes) result.append(boxes) # flip masks if masks is not None: masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks), lambda: masks) result.append(masks) # flip keypoints if keypoints is not None and keypoint_flip_permutation is not None: permutation = keypoint_flip_permutation keypoints = tf.cond( do_a_flip_random, lambda: keypoint_flip_horizontal(keypoints, 0.5, permutation), lambda: keypoints) result.append(keypoints) return tuple(result) def _compute_new_static_size(image, min_dimension, max_dimension): """Compute new static shape for resize_to_range method.""" image_shape = image.get_shape().as_list() orig_height = image_shape[0] orig_width = image_shape[1] num_channels = image_shape[2] orig_min_dim = min(orig_height, orig_width) # Calculates the larger of the possible sizes large_scale_factor = min_dimension / float(orig_min_dim) # Scaling orig_(height|width) by large_scale_factor will make the smaller # dimension equal to min_dimension, save for floating point rounding errors. # For reasonably-sized images, taking the nearest integer will reliably # eliminate this error. large_height = int(round(orig_height * large_scale_factor)) large_width = int(round(orig_width * large_scale_factor)) large_size = [large_height, large_width] if max_dimension: # Calculates the smaller of the possible sizes, use that if the larger # is too big. orig_max_dim = max(orig_height, orig_width) small_scale_factor = max_dimension / float(orig_max_dim) # Scaling orig_(height|width) by small_scale_factor will make the larger # dimension equal to max_dimension, save for floating point rounding # errors. For reasonably-sized images, taking the nearest integer will # reliably eliminate this error. small_height = int(round(orig_height * small_scale_factor)) small_width = int(round(orig_width * small_scale_factor)) small_size = [small_height, small_width] new_size = large_size if max(large_size) > max_dimension: new_size = small_size else: new_size = large_size return tf.constant(new_size + [num_channels]) def _compute_new_dynamic_size(image, min_dimension, max_dimension): """Compute new dynamic shape for resize_to_range method.""" image_shape = tf.shape(image) orig_height = tf.to_float(image_shape[0]) orig_width = tf.to_float(image_shape[1]) num_channels = image_shape[2] orig_min_dim = tf.minimum(orig_height, orig_width) # Calculates the larger of the possible sizes min_dimension = tf.constant(min_dimension, dtype=tf.float32) large_scale_factor = min_dimension / orig_min_dim # Scaling orig_(height|width) by large_scale_factor will make the smaller # dimension equal to min_dimension, save for floating point rounding errors. # For reasonably-sized images, taking the nearest integer will reliably # eliminate this error. large_height = tf.to_int32(tf.round(orig_height * large_scale_factor)) large_width = tf.to_int32(tf.round(orig_width * large_scale_factor)) large_size = tf.stack([large_height, large_width]) if max_dimension: # Calculates the smaller of the possible sizes, use that if the larger # is too big. orig_max_dim = tf.maximum(orig_height, orig_width) max_dimension = tf.constant(max_dimension, dtype=tf.float32) small_scale_factor = max_dimension / orig_max_dim # Scaling orig_(height|width) by small_scale_factor will make the larger # dimension equal to max_dimension, save for floating point rounding # errors. For reasonably-sized images, taking the nearest integer will # reliably eliminate this error. small_height = tf.to_int32(tf.round(orig_height * small_scale_factor)) small_width = tf.to_int32(tf.round(orig_width * small_scale_factor)) small_size = tf.stack([small_height, small_width]) new_size = tf.cond( tf.to_float(tf.reduce_max(large_size)) > max_dimension, lambda: small_size, lambda: large_size) else: new_size = large_size return tf.stack(tf.unstack(new_size) + [num_channels]) def resize_to_range(image, masks=None, min_dimension=None, max_dimension=None, method=tf.image.ResizeMethod.BILINEAR, align_corners=False, pad_to_max_dimension=False): """Resizes an image so its dimensions are within the provided value. The output size can be described by two cases: 1. If the image can be rescaled so its minimum dimension is equal to the provided value without the other dimension exceeding max_dimension, then do so. 2. Otherwise, resize so the largest dimension is equal to max_dimension. Args: image: A 3D tensor of shape [height, width, channels] masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. min_dimension: (optional) (scalar) desired size of the smaller image dimension. max_dimension: (optional) (scalar) maximum allowed size of the larger image dimension. method: (optional) interpolation method used in resizing. Defaults to BILINEAR. align_corners: bool. If true, exactly align all 4 corners of the input and output. Defaults to False. pad_to_max_dimension: Whether to resize the image and pad it with zeros so the resulting image is of the spatial size [max_dimension, max_dimension]. If masks are included they are padded similarly. Returns: Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A 3D tensor of shape [new_height, new_width, channels], where the image has been resized (with bilinear interpolation) so that min(new_height, new_width) == min_dimension or max(new_height, new_width) == max_dimension. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width]. resized_image_shape: A 1D tensor of shape [3] containing shape of the resized image. Raises: ValueError: if the image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('ResizeToRange', values=[image, min_dimension]): if image.get_shape().is_fully_defined(): new_size = _compute_new_static_size(image, min_dimension, max_dimension) else: new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension) new_image = tf.image.resize_images( image, new_size[:-1], method=method, align_corners=align_corners) if pad_to_max_dimension: new_image = tf.image.pad_to_bounding_box( new_image, 0, 0, max_dimension, max_dimension) result = [new_image] if masks is not None: new_masks = tf.expand_dims(masks, 3) new_masks = tf.image.resize_images( new_masks, new_size[:-1], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=align_corners) new_masks = tf.squeeze(new_masks, 3) if pad_to_max_dimension: new_masks = tf.image.pad_to_bounding_box( new_masks, 0, 0, max_dimension, max_dimension) result.append(new_masks) result.append(new_size) return result def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from): """Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to. Args: boxlist_to_copy_to: BoxList to which extra fields are copied. boxlist_to_copy_from: BoxList from which fields are copied. Returns: boxlist_to_copy_to with extra fields. """ for field in boxlist_to_copy_from.get_extra_fields(): boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field)) return boxlist_to_copy_to def box_list_scale(boxlist, y_scale, x_scale, scope=None): """scale box coordinates in x and y dimensions. Args: boxlist: BoxList holding N boxes y_scale: (float) scalar tensor x_scale: (float) scalar tensor scope: name scope. Returns: boxlist: BoxList holding N boxes """ with tf.name_scope(scope, 'Scale'): y_scale = tf.cast(y_scale, tf.float32) x_scale = tf.cast(x_scale, tf.float32) y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) y_min = y_scale * y_min y_max = y_scale * y_max x_min = x_scale * x_min x_max = x_scale * x_max scaled_boxlist = box_list.BoxList( tf.concat([y_min, x_min, y_max, x_max], 1)) return _copy_extra_fields(scaled_boxlist, boxlist) def keypoint_scale(keypoints, y_scale, x_scale, scope=None): """Scales keypoint coordinates in x and y dimensions. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] y_scale: (float) scalar tensor x_scale: (float) scalar tensor scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'Scale'): y_scale = tf.cast(y_scale, tf.float32) x_scale = tf.cast(x_scale, tf.float32) new_keypoints = keypoints * [[[y_scale, x_scale]]] return new_keypoints def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result)
TensorFlow2/Detection/Efficientdet
Efficientdet
requirements
absl-py>=0.7.1 matplotlib>=3.0.3 numpy>=1.16.4 Pillow>=6.0.0 PyYAML>=5.1 six>=1.12.0 pynvml==8.0.4 mpi4py>=3.0.3 tensorflow-addons>=0.13.0 git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI
TensorFlow/Translation/GNMT/scripts
scripts
verify_dataset
#!/bin/bash # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e DATASET_DIR=${1:-"data/wmt16_de_en"} ACTUAL_SRC_TRAIN=`cat ${DATASET_DIR}/train.tok.clean.bpe.32000.en |md5sum` EXPECTED_SRC_TRAIN='b7482095b787264a310d4933d197a134 -' if [[ $ACTUAL_SRC_TRAIN = $EXPECTED_SRC_TRAIN ]]; then echo "OK: correct ${DATASET_DIR}/train.tok.clean.bpe.32000.en" else echo "ERROR: incorrect ${DATASET_DIR}/train.tok.clean.bpe.32000.en" echo "ERROR: expected $EXPECTED_SRC_TRAIN" echo "ERROR: found $ACTUAL_SRC_TRAIN" fi ACTUAL_TGT_TRAIN=`cat ${DATASET_DIR}/train.tok.clean.bpe.32000.de |md5sum` EXPECTED_TGT_TRAIN='409064aedaef5b7c458ff19a7beda462 -' if [[ $ACTUAL_TGT_TRAIN = $EXPECTED_TGT_TRAIN ]]; then echo "OK: correct ${DATASET_DIR}/train.tok.clean.bpe.32000.de" else echo "ERROR: incorrect ${DATASET_DIR}/train.tok.clean.bpe.32000.de" echo "ERROR: expected $EXPECTED_TGT_TRAIN" echo "ERROR: found $ACTUAL_TGT_TRAIN" fi ACTUAL_SRC_VAL=`cat ${DATASET_DIR}/newstest_dev.tok.clean.bpe.32000.en |md5sum` EXPECTED_SRC_VAL='704c4ba8c8b63df1f6678d32b91438b5 -' if [[ $ACTUAL_SRC_VAL = $EXPECTED_SRC_VAL ]]; then echo "OK: correct ${DATASET_DIR}/newstest_dev.tok.clean.bpe.32000.en" else echo "ERROR: incorrect ${DATASET_DIR}/newstest_dev.tok.clean.bpe.32000.en" echo "ERROR: expected $EXPECTED_SRC_VAL" echo "ERROR: found $ACTUAL_SRC_VAL" fi ACTUAL_TGT_VAL=`cat ${DATASET_DIR}/newstest_dev.tok.clean.bpe.32000.de |md5sum` EXPECTED_TGT_VAL='d27f5a64c839e20c5caa8b9d60075dde -' if [[ $ACTUAL_TGT_VAL = $EXPECTED_TGT_VAL ]]; then echo "OK: correct ${DATASET_DIR}/newstest_dev.tok.clean.bpe.32000.de" else echo "ERROR: incorrect ${DATASET_DIR}/newstest_dev.tok.clean.bpe.32000.de" echo "ERROR: expected $EXPECTED_TGT_VAL" echo "ERROR: found $ACTUAL_TGT_VAL" fi ACTUAL_SRC_TEST=`cat ${DATASET_DIR}/newstest2014.tok.bpe.32000.en |md5sum` EXPECTED_SRC_TEST='cb014e2509f86cd81d5a87c240c07464 -' if [[ $ACTUAL_SRC_TEST = $EXPECTED_SRC_TEST ]]; then echo "OK: correct ${DATASET_DIR}/newstest2014.tok.bpe.32000.en" else echo "ERROR: incorrect ${DATASET_DIR}/newstest2014.tok.bpe.32000.en" echo "ERROR: expected $EXPECTED_SRC_TEST" echo "ERROR: found $ACTUAL_SRC_TEST" fi ACTUAL_TGT_TEST=`cat ${DATASET_DIR}/newstest2014.tok.bpe.32000.de |md5sum` EXPECTED_TGT_TEST='d616740f6026dc493e66efdf9ac1cb04 -' if [[ $ACTUAL_TGT_TEST = $EXPECTED_TGT_TEST ]]; then echo "OK: correct ${DATASET_DIR}/newstest2014.tok.bpe.32000.de" else echo "ERROR: incorrect ${DATASET_DIR}/newstest2014.tok.bpe.32000.de" echo "ERROR: expected $EXPECTED_TGT_TEST" echo "ERROR: found $ACTUAL_TGT_TEST" fi ACTUAL_TGT_TEST_TARGET=`cat ${DATASET_DIR}/newstest2014.de |md5sum` EXPECTED_TGT_TEST_TARGET='f6c3818b477e4a25cad68b61cc883c17 -' if [[ $ACTUAL_TGT_TEST_TARGET = $EXPECTED_TGT_TEST_TARGET ]]; then echo "OK: correct ${DATASET_DIR}/newstest2014.de" else echo "ERROR: incorrect ${DATASET_DIR}/newstest2014.de" echo "ERROR: expected $EXPECTED_TGT_TEST_TARGET" echo "ERROR: found $ACTUAL_TGT_TEST_TARGET" fi
PyTorch/Detection/Efficientdet/utils
utils
__init__
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/evaluation/voc
voc
voc_eval
# A modification version from chainercv repository. # (See https://github.com/chainer/chainercv/blob/master/chainercv/evaluations/eval_detection_voc.py) from __future__ import division import os from collections import defaultdict import numpy as np from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou def do_voc_evaluation(dataset, predictions, output_folder, logger): # TODO need to make the use_07_metric format available # for the user to choose pred_boxlists = [] gt_boxlists = [] for image_id, prediction in enumerate(predictions): img_info = dataset.get_img_info(image_id) if len(prediction) == 0: continue image_width = img_info["width"] image_height = img_info["height"] prediction = prediction.resize((image_width, image_height)) pred_boxlists.append(prediction) gt_boxlist = dataset.get_groundtruth(image_id) gt_boxlists.append(gt_boxlist) result = eval_detection_voc( pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=0.5, use_07_metric=True, ) result_str = "mAP: {:.4f}\n".format(result["map"]) for i, ap in enumerate(result["ap"]): if i == 0: # skip background continue result_str += "{:<16}: {:.4f}\n".format( dataset.map_class_id_to_class_name(i), ap ) logger.info(result_str) if output_folder: with open(os.path.join(output_folder, "result.txt"), "w") as fid: fid.write(result_str) return result def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False): """Evaluate on voc dataset. Args: pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields. gt_boxlists(list[BoxList]): ground truth boxlist, has labels field. iou_thresh: iou thresh use_07_metric: boolean Returns: dict represents the results """ assert len(gt_boxlists) == len( pred_boxlists ), "Length of gt and pred lists need to be same." prec, rec = calc_detection_voc_prec_rec( pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh ) ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric) return {"ap": ap, "map": np.nanmean(ap)} def calc_detection_voc_prec_rec(gt_boxlists, pred_boxlists, iou_thresh=0.5): """Calculate precision and recall based on evaluation code of PASCAL VOC. This function calculates precision and recall of predicted bounding boxes obtained from a dataset which has :math:`N` images. The code is based on the evaluation code used in PASCAL VOC Challenge. """ n_pos = defaultdict(int) score = defaultdict(list) match = defaultdict(list) for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists): pred_bbox = pred_boxlist.bbox.numpy() pred_label = pred_boxlist.get_field("labels").numpy() pred_score = pred_boxlist.get_field("scores").numpy() gt_bbox = gt_boxlist.bbox.numpy() gt_label = gt_boxlist.get_field("labels").numpy() gt_difficult = gt_boxlist.get_field("difficult").numpy() for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)): pred_mask_l = pred_label == l pred_bbox_l = pred_bbox[pred_mask_l] pred_score_l = pred_score[pred_mask_l] # sort by score order = pred_score_l.argsort()[::-1] pred_bbox_l = pred_bbox_l[order] pred_score_l = pred_score_l[order] gt_mask_l = gt_label == l gt_bbox_l = gt_bbox[gt_mask_l] gt_difficult_l = gt_difficult[gt_mask_l] n_pos[l] += np.logical_not(gt_difficult_l).sum() score[l].extend(pred_score_l) if len(pred_bbox_l) == 0: continue if len(gt_bbox_l) == 0: match[l].extend((0,) * pred_bbox_l.shape[0]) continue # VOC evaluation follows integer typed bounding boxes. pred_bbox_l = pred_bbox_l.copy() pred_bbox_l[:, 2:] += 1 gt_bbox_l = gt_bbox_l.copy() gt_bbox_l[:, 2:] += 1 iou = boxlist_iou( BoxList(pred_bbox_l, gt_boxlist.size), BoxList(gt_bbox_l, gt_boxlist.size), ).numpy() gt_index = iou.argmax(axis=1) # set -1 if there is no matching ground truth gt_index[iou.max(axis=1) < iou_thresh] = -1 del iou selec = np.zeros(gt_bbox_l.shape[0], dtype=bool) for gt_idx in gt_index: if gt_idx >= 0: if gt_difficult_l[gt_idx]: match[l].append(-1) else: if not selec[gt_idx]: match[l].append(1) else: match[l].append(0) selec[gt_idx] = True else: match[l].append(0) n_fg_class = max(n_pos.keys()) + 1 prec = [None] * n_fg_class rec = [None] * n_fg_class for l in n_pos.keys(): score_l = np.array(score[l]) match_l = np.array(match[l], dtype=np.int8) order = score_l.argsort()[::-1] match_l = match_l[order] tp = np.cumsum(match_l == 1) fp = np.cumsum(match_l == 0) # If an element of fp + tp is 0, # the corresponding element of prec[l] is nan. prec[l] = tp / (fp + tp) # If n_pos[l] is 0, rec[l] is None. if n_pos[l] > 0: rec[l] = tp / n_pos[l] return prec, rec def calc_detection_voc_ap(prec, rec, use_07_metric=False): """Calculate average precisions based on evaluation code of PASCAL VOC. This function calculates average precisions from given precisions and recalls. The code is based on the evaluation code used in PASCAL VOC Challenge. Args: prec (list of numpy.array): A list of arrays. :obj:`prec[l]` indicates precision for class :math:`l`. If :obj:`prec[l]` is :obj:`None`, this function returns :obj:`numpy.nan` for class :math:`l`. rec (list of numpy.array): A list of arrays. :obj:`rec[l]` indicates recall for class :math:`l`. If :obj:`rec[l]` is :obj:`None`, this function returns :obj:`numpy.nan` for class :math:`l`. use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric for calculating average precision. The default value is :obj:`False`. Returns: ~numpy.ndarray: This function returns an array of average precisions. The :math:`l`-th value corresponds to the average precision for class :math:`l`. If :obj:`prec[l]` or :obj:`rec[l]` is :obj:`None`, the corresponding value is set to :obj:`numpy.nan`. """ n_fg_class = len(prec) ap = np.empty(n_fg_class) for l in range(n_fg_class): if prec[l] is None or rec[l] is None: ap[l] = np.nan continue if use_07_metric: # 11 point metric ap[l] = 0 for t in np.arange(0.0, 1.1, 0.1): if np.sum(rec[l] >= t) == 0: p = 0 else: p = np.max(np.nan_to_num(prec[l])[rec[l] >= t]) ap[l] += p / 11 else: # correct AP calculation # first append sentinel values at the end mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0])) mrec = np.concatenate(([0], rec[l], [1])) mpre = np.maximum.accumulate(mpre[::-1])[::-1] # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap
PyTorch/Classification/ConvNets/triton
triton
deployer
#!/usr/bin/python # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import os import torch import argparse import triton.deployer_lib as deployer_lib def get_model_args(model_args): """ the arguments initialize_model will receive """ parser = argparse.ArgumentParser() ## Required parameters by the model. parser.add_argument( "--config", default="resnet50", type=str, required=True, help="Network to deploy", ) parser.add_argument( "--checkpoint", default=None, type=str, help="The checkpoint of the model. " ) parser.add_argument( "--batch_size", default=1000, type=int, help="Batch size for inference" ) parser.add_argument( "--fp16", default=False, action="store_true", help="FP16 inference" ) parser.add_argument( "--dump_perf_data", type=str, default=None, help="Directory to dump perf data sample for testing", ) return parser.parse_args(model_args) def initialize_model(args): """ return model, ready to trace """ from image_classification.resnet import build_resnet model = build_resnet(args.config, "fanin", 1000, fused_se=False) if args.checkpoint: state_dict = torch.load(args.checkpoint, map_location="cpu") model.load_state_dict( {k.replace("module.", ""): v for k, v in state_dict.items()} ) model.load_state_dict(state_dict) return model.half() if args.fp16 else model def get_dataloader(args): """ return dataloader for inference """ from image_classification.dataloaders import get_synthetic_loader def data_loader(): loader, _ = get_synthetic_loader(None, 128, 1000, True, fp16=args.fp16) processed = 0 for inp, _ in loader: yield inp processed += 1 if processed > 10: break return data_loader() if __name__ == "__main__": # don't touch this! deployer, model_argv = deployer_lib.create_deployer( sys.argv[1:] ) # deployer and returns removed deployer arguments model_args = get_model_args(model_argv) model = initialize_model(model_args) dataloader = get_dataloader(model_args) if model_args.dump_perf_data: input_0 = next(iter(dataloader)) if model_args.fp16: input_0 = input_0.half() os.makedirs(model_args.dump_perf_data, exist_ok=True) input_0.detach().cpu().numpy()[0].tofile( os.path.join(model_args.dump_perf_data, "input__0") ) deployer.deploy(dataloader, model)
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection
object_detection
ops
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A module for helper tensorflow ops. This is originally implemented in TensorFlow Object Detection API. """ import tensorflow as tf from mrcnn_tf2.object_detection import shape_utils def indices_to_dense_vector(indices, size, indices_value=1., default_value=0, dtype=tf.float32): """Creates dense vector with indices set to specific value and rest to zeros. This function exists because it is unclear if it is safe to use tf.sparse_to_dense(indices, [size], 1, validate_indices=False) with indices which are not ordered. This function accepts a dynamic size (e.g. tf.shape(tensor)[0]) Args: indices: 1d Tensor with integer indices which are to be set to indices_values. size: scalar with size (integer) of output Tensor. indices_value: values of elements specified by indices in the output vector default_value: values of other elements in the output vector. dtype: data type. Returns: dense 1D Tensor of shape [size] with indices set to indices_values and the rest set to default_value. """ size = tf.cast(size, dtype=tf.int32) zeros = tf.ones([size], dtype=dtype) * default_value values = tf.ones_like(indices, dtype=dtype) * indices_value return tf.dynamic_stitch([tf.range(size), tf.cast(indices, dtype=tf.int32)], [zeros, values]) def matmul_gather_on_zeroth_axis(params, indices, scope=None): """Matrix multiplication based implementation of tf.gather on zeroth axis. TODO(rathodv, jonathanhuang): enable sparse matmul option. Args: params: A float32 Tensor. The tensor from which to gather values. Must be at least rank 1. indices: A Tensor. Must be one of the following types: int32, int64. Must be in range [0, params.shape[0]) scope: A name for the operation (optional). Returns: A Tensor. Has the same type as params. Values from params gathered from indices given by indices, with shape indices.shape + params.shape[1:]. """ params_shape = shape_utils.combined_static_and_dynamic_shape(params) indices_shape = shape_utils.combined_static_and_dynamic_shape(indices) params2d = tf.reshape(params, [params_shape[0], -1]) indicator_matrix = tf.one_hot(indices, params_shape[0]) gathered_result_flattened = tf.matmul(indicator_matrix, params2d) return tf.reshape(gathered_result_flattened, tf.stack(indices_shape + params_shape[1:]))
Tools/PyTorch/TimeSeriesPredictionPlatform/examples
examples
hp_search_multiobjective
/# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python launch_training.py \ -m \ 'model.config.n_head=choice(1,2,4)' \ 'trainer.optimizer.lr=tag(log, interval(1e-5, 1e-2))' \ model=tft \ dataset=electricity \ trainer/criterion=quantile \ trainer.config.batch_size=1024 \ trainer.config.num_epochs=2 \ trainer.config.log_interval=-1 \ "evaluator.config.metrics=[P50, P90, MAE, MSE]" \ +optuna_objectives=[P50,P90] \ hydra/sweeper=optuna \ hydra.sweeper.direction=[minimize,minimize] \ hydra.sweeper.n_trials=3 \ hydra.sweeper.n_jobs=1 \
PyTorch/Segmentation/nnUNet/data_loading
data_loading
dali_loader
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import numpy as np import nvidia.dali.fn as fn import nvidia.dali.math as math import nvidia.dali.ops as ops import nvidia.dali.types as types from nvidia.dali.pipeline import Pipeline from nvidia.dali.plugin.pytorch import DALIGenericIterator def random_augmentation(probability, augmented, original): condition = fn.cast(fn.random.coin_flip(probability=probability), dtype=types.DALIDataType.BOOL) neg_condition = condition ^ True return condition * augmented + neg_condition * original class GenericPipeline(Pipeline): def __init__(self, batch_size, num_threads, device_id, **kwargs): super().__init__(batch_size, num_threads, device_id) self.kwargs = kwargs self.dim = kwargs["dim"] self.device = device_id self.layout = kwargs["layout"] self.patch_size = kwargs["patch_size"] self.load_to_gpu = kwargs["load_to_gpu"] self.input_x = self.get_reader(kwargs["imgs"]) self.input_y = self.get_reader(kwargs["lbls"]) if kwargs["lbls"] is not None else None self.cdhw2dhwc = ops.Transpose(device="gpu", perm=[1, 2, 3, 0]) def get_reader(self, data): return ops.readers.Numpy( files=data, device="cpu", read_ahead=True, dont_use_mmap=True, pad_last_batch=True, shard_id=self.device, seed=self.kwargs["seed"], num_shards=self.kwargs["gpus"], shuffle_after_epoch=self.kwargs["shuffle"], ) def load_data(self): img = self.input_x(name="ReaderX") if self.load_to_gpu: img = img.gpu() img = fn.reshape(img, layout="CDHW") if self.input_y is not None: lbl = self.input_y(name="ReaderY") if self.load_to_gpu: lbl = lbl.gpu() lbl = fn.reshape(lbl, layout="CDHW") return img, lbl return img def make_dhwc_layout(self, img, lbl): img, lbl = self.cdhw2dhwc(img), self.cdhw2dhwc(lbl) return img, lbl def crop(self, data): return fn.crop(data, crop=self.patch_size, out_of_bounds_policy="pad") def crop_fn(self, img, lbl): img, lbl = self.crop(img), self.crop(lbl) return img, lbl def transpose_fn(self, img, lbl): img, lbl = fn.transpose(img, perm=(1, 0, 2, 3)), fn.transpose(lbl, perm=(1, 0, 2, 3)) return img, lbl class TrainPipeline(GenericPipeline): def __init__(self, batch_size, num_threads, device_id, **kwargs): super().__init__(batch_size, num_threads, device_id, **kwargs) self.oversampling = kwargs["oversampling"] self.crop_shape = types.Constant(np.array(self.patch_size), dtype=types.INT64) self.crop_shape_float = types.Constant(np.array(self.patch_size), dtype=types.FLOAT) @staticmethod def slice_fn(img): return fn.slice(img, 1, 3, axes=[0]) def resize(self, data, interp_type): return fn.resize(data, interp_type=interp_type, size=self.crop_shape_float) def biased_crop_fn(self, img, label): roi_start, roi_end = fn.segmentation.random_object_bbox( label, device="cpu", background=0, format="start_end", cache_objects=True, foreground_prob=self.oversampling, ) anchor = fn.roi_random_crop(label, roi_start=roi_start, roi_end=roi_end, crop_shape=[1, *self.patch_size]) anchor = fn.slice(anchor, 1, 3, axes=[0]) # drop channels from anchor img, label = fn.slice( [img, label], anchor, self.crop_shape, axis_names="DHW", out_of_bounds_policy="pad", device="cpu" ) return img.gpu(), label.gpu() def zoom_fn(self, img, lbl): scale = random_augmentation(0.15, fn.random.uniform(range=(0.7, 1.0)), 1.0) d, h, w = [scale * x for x in self.patch_size] if self.dim == 2: d = self.patch_size[0] img, lbl = fn.crop(img, crop_h=h, crop_w=w, crop_d=d), fn.crop(lbl, crop_h=h, crop_w=w, crop_d=d) img, lbl = self.resize(img, types.DALIInterpType.INTERP_CUBIC), self.resize(lbl, types.DALIInterpType.INTERP_NN) return img, lbl def noise_fn(self, img): img_noised = img + fn.random.normal(img, stddev=fn.random.uniform(range=(0.0, 0.33))) return random_augmentation(0.15, img_noised, img) def blur_fn(self, img): img_blurred = fn.gaussian_blur(img, sigma=fn.random.uniform(range=(0.5, 1.5))) return random_augmentation(0.15, img_blurred, img) def brightness_fn(self, img): brightness_scale = random_augmentation(0.15, fn.random.uniform(range=(0.7, 1.3)), 1.0) return img * brightness_scale def contrast_fn(self, img): scale = random_augmentation(0.15, fn.random.uniform(range=(0.65, 1.5)), 1.0) return math.clamp(img * scale, fn.reductions.min(img), fn.reductions.max(img)) def flips_fn(self, img, lbl): kwargs = { "horizontal": fn.random.coin_flip(probability=0.5), "vertical": fn.random.coin_flip(probability=0.5), } if self.dim == 3: kwargs.update({"depthwise": fn.random.coin_flip(probability=0.5)}) return fn.flip(img, **kwargs), fn.flip(lbl, **kwargs) def define_graph(self): img, lbl = self.load_data() img, lbl = self.biased_crop_fn(img, lbl) img, lbl = self.zoom_fn(img, lbl) img, lbl = self.flips_fn(img, lbl) img = self.noise_fn(img) img = self.blur_fn(img) img = self.brightness_fn(img) img = self.contrast_fn(img) if self.dim == 2: img, lbl = self.transpose_fn(img, lbl) if self.layout == "NDHWC" and self.dim == 3: img, lbl = self.make_dhwc_layout(img, lbl) return img, lbl class EvalPipeline(GenericPipeline): def __init__(self, batch_size, num_threads, device_id, **kwargs): super().__init__(batch_size, num_threads, device_id, **kwargs) self.invert_resampled_y = kwargs["invert_resampled_y"] if self.invert_resampled_y: self.input_meta = self.get_reader(kwargs["meta"]) self.input_orig_y = self.get_reader(kwargs["orig_lbl"]) def define_graph(self): img, lbl = self.load_data() if self.invert_resampled_y: meta = self.input_meta(name="ReaderM") orig_lbl = self.input_orig_y(name="ReaderO") return img, lbl, meta, orig_lbl if self.layout == "NDHWC" and self.dim == 3: img, lbl = self.make_dhwc_layout(img, lbl) return img, lbl class TritonPipeline(GenericPipeline): def __init__(self, batch_size, num_threads, device_id, **kwargs): super().__init__(batch_size, num_threads, device_id, **kwargs) def define_graph(self): img, lbl = self.load_data() img, lbl = self.crop_fn(img, lbl) return img, lbl class TestPipeline(GenericPipeline): def __init__(self, batch_size, num_threads, device_id, **kwargs): super().__init__(batch_size, num_threads, device_id, **kwargs) self.input_meta = self.get_reader(kwargs["meta"]) def define_graph(self): img = self.load_data() meta = self.input_meta(name="ReaderM") return img, meta class BenchmarkPipeline(GenericPipeline): def __init__(self, batch_size, num_threads, device_id, **kwargs): super().__init__(batch_size, num_threads, device_id, **kwargs) def define_graph(self): img, lbl = self.load_data() img, lbl = self.crop_fn(img, lbl) if self.dim == 2: img, lbl = self.transpose_fn(img, lbl) if self.layout == "NDHWC" and self.dim == 3: img, lbl = self.make_dhwc_layout(img, lbl) return img, lbl PIPELINES = { "train": TrainPipeline, "eval": EvalPipeline, "test": TestPipeline, "benchmark": BenchmarkPipeline, "triton": TritonPipeline, } class LightningWrapper(DALIGenericIterator): def __init__(self, pipe, **kwargs): super().__init__(pipe, **kwargs) def __next__(self): out = super().__next__()[0] return out def fetch_dali_loader(imgs, lbls, batch_size, mode, **kwargs): assert len(imgs) > 0, "Empty list of images!" if lbls is not None: assert len(imgs) == len(lbls), f"Number of images ({len(imgs)}) not matching number of labels ({len(lbls)})" if kwargs["benchmark"]: # Just to make sure the number of examples is large enough for benchmark run. batches = kwargs["test_batches"] if mode == "test" else kwargs["train_batches"] examples = batches * batch_size * kwargs["gpus"] imgs = list(itertools.chain(*(100 * [imgs])))[:examples] lbls = list(itertools.chain(*(100 * [lbls])))[:examples] mode = "benchmark" pipeline = PIPELINES[mode] shuffle = True if mode == "train" else False dynamic_shape = True if mode in ["eval", "test"] else False load_to_gpu = True if mode in ["eval", "test", "benchmark"] else False pipe_kwargs = {"imgs": imgs, "lbls": lbls, "load_to_gpu": load_to_gpu, "shuffle": shuffle, **kwargs} output_map = ["image", "meta"] if mode == "test" else ["image", "label"] if kwargs["dim"] == 2 and mode in ["train", "benchmark"]: batch_size_2d = batch_size // kwargs["nvol"] if mode == "train" else batch_size batch_size = kwargs["nvol"] if mode == "train" else 1 pipe_kwargs.update({"patch_size": [batch_size_2d] + kwargs["patch_size"]}) rank = int(os.getenv("LOCAL_RANK", "0")) if mode == "eval": # We sharded the data for evaluation manually. rank = 0 pipe_kwargs["gpus"] = 1 pipe = pipeline(batch_size, kwargs["num_workers"], rank, **pipe_kwargs) return LightningWrapper( pipe, auto_reset=True, reader_name="ReaderX", output_map=output_map, dynamic_shape=dynamic_shape, )
PyTorch/SpeechSynthesis/FastPitch/triton
triton
model
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import sys from os.path import abspath, dirname sys.path.append(abspath(dirname(__file__)+'/../')) from common.text import symbols from inference import load_model_from_ckpt import models from torch.utils.data import DataLoader import torch import numpy as np def update_argparser(parser): ### copy-paste from ./fastpitch/arg_parser.py io = parser.add_argument_group('io parameters') io.add_argument('--n-mel-channels', default=80, type=int, help='Number of bins in mel-spectrograms') symbols = parser.add_argument_group('symbols parameters') symbols.add_argument('--n-symbols', default=148, type=int, help='Number of symbols in dictionary') symbols.add_argument('--padding-idx', default=0, type=int, help='Index of padding symbol in dictionary') symbols.add_argument('--symbols-embedding-dim', default=384, type=int, help='Input embedding dimension') text_processing = parser.add_argument_group('Text processing parameters') text_processing.add_argument('--symbol-set', type=str, default='english_basic', help='Define symbol set for input text') in_fft = parser.add_argument_group('input FFT parameters') in_fft.add_argument('--in-fft-n-layers', default=6, type=int, help='Number of FFT blocks') in_fft.add_argument('--in-fft-n-heads', default=1, type=int, help='Number of attention heads') in_fft.add_argument('--in-fft-d-head', default=64, type=int, help='Dim of attention heads') in_fft.add_argument('--in-fft-conv1d-kernel-size', default=3, type=int, help='Conv-1D kernel size') in_fft.add_argument('--in-fft-conv1d-filter-size', default=1536, type=int, help='Conv-1D filter size') in_fft.add_argument('--in-fft-output-size', default=384, type=int, help='Output dim') in_fft.add_argument('--p-in-fft-dropout', default=0.1, type=float, help='Dropout probability') in_fft.add_argument('--p-in-fft-dropatt', default=0.1, type=float, help='Multi-head attention dropout') in_fft.add_argument('--p-in-fft-dropemb', default=0.0, type=float, help='Dropout added to word+positional embeddings') out_fft = parser.add_argument_group('output FFT parameters') out_fft.add_argument('--out-fft-n-layers', default=6, type=int, help='Number of FFT blocks') out_fft.add_argument('--out-fft-n-heads', default=1, type=int, help='Number of attention heads') out_fft.add_argument('--out-fft-d-head', default=64, type=int, help='Dim of attention head') out_fft.add_argument('--out-fft-conv1d-kernel-size', default=3, type=int, help='Conv-1D kernel size') out_fft.add_argument('--out-fft-conv1d-filter-size', default=1536, type=int, help='Conv-1D filter size') out_fft.add_argument('--out-fft-output-size', default=384, type=int, help='Output dim') out_fft.add_argument('--p-out-fft-dropout', default=0.1, type=float, help='Dropout probability for out_fft') out_fft.add_argument('--p-out-fft-dropatt', default=0.1, type=float, help='Multi-head attention dropout') out_fft.add_argument('--p-out-fft-dropemb', default=0.0, type=float, help='Dropout added to word+positional embeddings') dur_pred = parser.add_argument_group('duration predictor parameters') dur_pred.add_argument('--dur-predictor-kernel-size', default=3, type=int, help='Duration predictor conv-1D kernel size') dur_pred.add_argument('--dur-predictor-filter-size', default=256, type=int, help='Duration predictor conv-1D filter size') dur_pred.add_argument('--p-dur-predictor-dropout', default=0.1, type=float, help='Dropout probability for duration predictor') dur_pred.add_argument('--dur-predictor-n-layers', default=2, type=int, help='Number of conv-1D layers') pitch_pred = parser.add_argument_group('pitch predictor parameters') pitch_pred.add_argument('--pitch-predictor-kernel-size', default=3, type=int, help='Pitch predictor conv-1D kernel size') pitch_pred.add_argument('--pitch-predictor-filter-size', default=256, type=int, help='Pitch predictor conv-1D filter size') pitch_pred.add_argument('--p-pitch-predictor-dropout', default=0.1, type=float, help='Pitch probability for pitch predictor') pitch_pred.add_argument('--pitch-predictor-n-layers', default=2, type=int, help='Number of conv-1D layers') energy_pred = parser.add_argument_group('energy predictor parameters') energy_pred.add_argument('--energy-conditioning', type=bool, default=True) energy_pred.add_argument('--energy-predictor-kernel-size', default=3, type=int, help='Pitch predictor conv-1D kernel size') energy_pred.add_argument('--energy-predictor-filter-size', default=256, type=int, help='Pitch predictor conv-1D filter size') energy_pred.add_argument('--p-energy-predictor-dropout', default=0.1, type=float, help='Pitch probability for energy predictor') energy_pred.add_argument('--energy-predictor-n-layers', default=2, type=int, help='Number of conv-1D layers') ###~copy-paste from ./fastpitch/arg_parser.py parser.add_argument('--checkpoint', type=str, help='Full path to the FastPitch checkpoint file') parser.add_argument('--torchscript', action='store_true', help='Apply TorchScript') parser.add_argument('--ema', action='store_true', help='Use EMA averaged model \ (if saved in checkpoints)') cond = parser.add_argument_group('conditioning parameters') cond.add_argument('--pitch-embedding-kernel-size', default=3, type=int, help='Pitch embedding conv-1D kernel size') cond.add_argument('--energy-embedding-kernel-size', default=3, type=int, help='Pitch embedding conv-1D kernel size') cond.add_argument('--speaker-emb-weight', type=float, default=1.0, help='Scale speaker embedding') cond.add_argument('--n-speakers', type=int, default=1, help='Number of speakers in the model.') cond.add_argument('--pitch-conditioning-formants', default=1, type=int, help='Number of speech formants to condition on.') parser.add_argument("--precision", type=str, default="fp32", choices=["fp32", "fp16"], help="PyTorch model precision") parser.add_argument("--output-format", type=str, required=True, help="Output format") def get_model(**model_args): import argparse args = argparse.Namespace(**model_args) model_config = models.get_model_config(model_name="FastPitch", args=args) jittable = True if 'ts-' in args.output_format else False model = models.get_model(model_name="FastPitch", model_config=model_config, device='cuda', forward_is_infer=True, jitable=jittable) model = load_model_from_ckpt(args.checkpoint, args.ema, model) if args.precision == "fp16": model = model.half() model.eval() tensor_names = {"inputs": ["INPUT__0"], "outputs" : ["OUTPUT__0", "OUTPUT__1", "OUTPUT__2", "OUTPUT__3", "OUTPUT__4"]} return model, tensor_names
PyTorch/SpeechRecognition/QuartzNet/utils
utils
convert_librispeech
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env python import argparse import os import glob import multiprocessing import json import pandas as pd from preprocessing_utils import parallel_preprocess parser = argparse.ArgumentParser(description='Preprocess LibriSpeech.') parser.add_argument('--input_dir', type=str, required=True, help='LibriSpeech collection input dir') parser.add_argument('--dest_dir', type=str, required=True, help='Output dir') parser.add_argument('--output_json', type=str, default='./', help='name of the output json file.') parser.add_argument('-s','--speed', type=float, nargs='*', help='Speed perturbation ratio') parser.add_argument('--target_sr', type=int, default=None, help='Target sample rate. ' 'defaults to the input sample rate') parser.add_argument('--overwrite', action='store_true', help='Overwrite file if exists') parser.add_argument('--parallel', type=int, default=multiprocessing.cpu_count(), help='Number of threads to use when processing audio files') args = parser.parse_args() args.input_dir = args.input_dir.rstrip('/') args.dest_dir = args.dest_dir.rstrip('/') def build_input_arr(input_dir): txt_files = glob.glob(os.path.join(input_dir, '**', '*.trans.txt'), recursive=True) input_data = [] for txt_file in txt_files: rel_path = os.path.relpath(txt_file, input_dir) with open(txt_file) as fp: for line in fp: fname, _, transcript = line.partition(' ') input_data.append(dict(input_relpath=os.path.dirname(rel_path), input_fname=fname+'.flac', transcript=transcript)) return input_data print("[%s] Scaning input dir..." % args.output_json) dataset = build_input_arr(input_dir=args.input_dir) print("[%s] Converting audio files..." % args.output_json) dataset = parallel_preprocess(dataset=dataset, input_dir=args.input_dir, dest_dir=args.dest_dir, target_sr=args.target_sr, speed=args.speed, overwrite=args.overwrite, parallel=args.parallel) print("[%s] Generating json..." % args.output_json) df = pd.DataFrame(dataset, dtype=object) # Save json with python. df.to_json() produces back slashed in file paths dataset = df.to_dict(orient='records') with open(args.output_json, 'w') as fp: json.dump(dataset, fp, indent=2)
PyTorch/SpeechSynthesis/FastPitch/hifigan
hifigan
denoiser
import torch from .stft import STFT class Denoiser(torch.nn.Module): """ Removes model bias from audio produced with hifigan """ def __init__(self, hifigan, filter_length=1024, n_overlap=4, win_length=1024, mode='zeros'): super(Denoiser, self).__init__() self.stft = STFT(filter_length=filter_length, hop_length=int(filter_length/n_overlap), win_length=win_length).cuda() if mode == 'zeros': mel_input = torch.zeros( (1, 80, 88), dtype=hifigan.ups[0].weight.dtype, device=hifigan.ups[0].weight.device) elif mode == 'normal': mel_input = torch.randn( (1, 80, 88), dtype=hifigan.upsample.weight.dtype, device=hifigan.upsample.weight.device) else: raise Exception("Mode {} if not supported".format(mode)) with torch.no_grad(): bias_audio = hifigan(mel_input).float()[0] bias_spec, _ = self.stft.transform(bias_audio) self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None]) def forward(self, audio, strength=0.1): audio_spec, audio_angles = self.stft.transform(audio.cuda().float()) audio_spec_denoised = audio_spec - self.bias_spec * strength audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0) audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles) return audio_denoised
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
facessd_mobilenet_v2_quantized_320x320_open_image_v4
# Quantized trained SSD with Mobilenet v2 on Open Images v4. # Non-face boxes are dropped during training and non-face groundtruth boxes are # ignored when evaluating. # # Users should configure the fine_tune_checkpoint field in the train config as # well as the label_map_path and input_path fields in the train_input_reader and # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that # should be configured. model { ssd { num_classes: 1 image_resizer { fixed_shape_resizer { height: 320 width: 320 } } feature_extractor { type: "ssd_mobilenet_v2" depth_multiplier: 1.0 min_depth: 16 conv_hyperparams { regularizer { l2_regularizer { weight: 4.0e-05 } } initializer { truncated_normal_initializer { mean: 0.0 stddev: 0.03 } } activation: RELU_6 batch_norm { decay: 0.9997 center: true scale: true epsilon: 0.001 train: true } } pad_to_multiple: 32 use_explicit_padding: true } box_coder { faster_rcnn_box_coder { y_scale: 10.0 x_scale: 10.0 height_scale: 5.0 width_scale: 5.0 } } matcher { argmax_matcher { matched_threshold: 0.5 unmatched_threshold: 0.5 ignore_thresholds: false negatives_lower_than_unmatched: true force_match_for_each_row: true } } similarity_calculator { iou_similarity { } } box_predictor { convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { weight: 4.0e-05 } } initializer { truncated_normal_initializer { mean: 0.0 stddev: 0.03 } } activation: RELU_6 batch_norm { decay: 0.9997 center: true scale: true epsilon: 0.001 train: true } } min_depth: 0 max_depth: 0 num_layers_before_predictor: 0 use_dropout: false kernel_size: 3 box_code_size: 4 apply_sigmoid_to_scores: false } } anchor_generator { ssd_anchor_generator { num_layers: 6 min_scale: 0.2 max_scale: 0.95 aspect_ratios: 1.0 aspect_ratios: 2.0 aspect_ratios: 0.5 aspect_ratios: 3.0 aspect_ratios: 0.3333 height_stride: 16 height_stride: 32 height_stride: 64 height_stride: 128 height_stride: 256 height_stride: 512 width_stride: 16 width_stride: 32 width_stride: 64 width_stride: 128 width_stride: 256 width_stride: 512 } } post_processing { batch_non_max_suppression { score_threshold: 1.0e-08 iou_threshold: 0.5 max_detections_per_class: 100 max_total_detections: 100 } score_converter: SIGMOID } normalize_loss_by_num_matches: true loss { localization_loss { weighted_smooth_l1 { } } classification_loss { weighted_sigmoid { } } hard_example_miner { num_hard_examples: 3000 iou_threshold: 0.99 loss_type: CLASSIFICATION max_negatives_per_positive: 3 min_negatives_per_image: 10 } classification_weight: 1.0 localization_weight: 1.0 } } } train_config { batch_size: 32 data_augmentation_options { random_horizontal_flip { keypoint_flip_permutation: 1 keypoint_flip_permutation: 0 keypoint_flip_permutation: 2 keypoint_flip_permutation: 3 keypoint_flip_permutation: 5 keypoint_flip_permutation: 4 } } data_augmentation_options { ssd_random_crop_fixed_aspect_ratio { } } optimizer { rms_prop_optimizer { learning_rate { exponential_decay_learning_rate { initial_learning_rate: 0.004 decay_steps: 800720 decay_factor: 0.95 } } momentum_optimizer_value: 0.9 decay: 0.9 epsilon: 1.0 } } fine_tune_checkpoint: "" } train_input_reader { label_map_path: "PATH_TO_BE_CONFIGURED/face_label_map.pbtxt" tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/face_train.record-?????-of-00100" } } eval_config { metrics_set: "coco_detection_metrics" use_moving_averages: true } eval_input_reader { label_map_path: "PATH_TO_BE_CONFIGURED/face_label_map.pbtxt" shuffle: false num_readers: 1 tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/face_val.record-?????-of-00010" } } graph_rewriter { quantization { delay: 500000 weight_bits: 8 activation_bits: 8 } }
TensorFlow/Segmentation/UNet_Industrial/model/layers
layers
__init__
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================== # # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================== from model.layers.utils import _log_hparams from model.layers.activation import crelu from model.layers.activation import elu from model.layers.activation import leaky_relu from model.layers.activation import prelu from model.layers.activation import relu from model.layers.activation import relu6 from model.layers.activation import selu from model.layers.activation import sigmoid from model.layers.activation import softmax from model.layers.activation import tanh from model.layers.conv2d import conv2d from model.layers.deconv2d import deconv2d from model.layers.dense import dense from model.layers.drop_layers import dropout from model.layers.math_ops import reduce_mean from model.layers.normalization import batch_norm from model.layers.padding import pad from model.layers.pooling import average_pooling2d from model.layers.pooling import max_pooling2d from model.layers.array_ops import concat from model.layers.array_ops import flatten from model.layers.array_ops import reshape from model.layers.array_ops import squeeze from model.layers.array_ops import upscale_2d __all__ = [ # activation layers 'crelu', 'elu', 'leaky_relu', 'prelu', 'relu', 'relu6', 'selu', 'sigmoid', 'softmax', 'tanh', # array ops 'concat', 'flatten', 'reshape', 'squeeze', 'upscale_2d', # conv layers 'conv2d', # deconv layers 'deconv2d', # dense layers 'dense', # drop layers 'dropout', # math_ops layers 'reduce_mean', # normalization layers 'batch_norm', # padding layers 'pad', # pooling layers 'average_pooling2d', 'max_pooling2d', ]
TensorFlow/Translation/GNMT/examples
examples
DGXA100_TF32_8GPU
python nmt.py --output_dir=results --batch_size=1024 --learning_rate=2e-3 --num_gpus=8
TensorFlow/Classification/ConvNets/resnet50v1.5/training
training
DGX1_RN50_FP32_90E
#!/bin/bash # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. WORKSPACE=${1:-"/workspace/rn50v15_tf"} DATA_DIR=${2:-"/data"} OTHER=${@:3} if [[ ! -z "${BIND_TO_SOCKET}" ]]; then BIND_TO_SOCKET="--bind-to socket" fi mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 8 python3 main.py --arch=resnet50 \ --mode=train_and_evaluate --iter_unit=epoch --num_iter=90 \ --batch_size=128 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \ --lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=3.0517578125e-05 \ --data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \ --results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/common
common
logging
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef TENSORRT_LOGGING_H #define TENSORRT_LOGGING_H #include "NvInferRuntimeCommon.h" #include <cassert> #include <ctime> #include <iomanip> #include <iostream> #include <ostream> #include <sstream> #include <string> using Severity = nvinfer1::ILogger::Severity; class LogStreamConsumerBuffer : public std::stringbuf { public: LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog) : mOutput(stream) , mPrefix(prefix) , mShouldLog(shouldLog) { } LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other) : mOutput(other.mOutput) , mPrefix(std::move(other.mPrefix)) , mShouldLog(std::move(other.mShouldLog)) { } ~LogStreamConsumerBuffer() { // std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence // std::streambuf::pptr() gives a pointer to the current position of the output sequence // if the pointer to the beginning is not equal to the pointer to the current position, // call putOutput() to log the output to the stream if (pbase() != pptr()) { putOutput(); } } // synchronizes the stream buffer and returns 0 on success // synchronizing the stream buffer consists of inserting the buffer contents into the stream, // resetting the buffer and flushing the stream virtual int sync() { putOutput(); return 0; } void putOutput() { if (mShouldLog) { // prepend timestamp std::time_t timestamp = std::time(nullptr); tm* tm_local = std::localtime(&timestamp); std::cout << "["; std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mon << "/"; std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/"; std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-"; std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":"; std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":"; std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] "; // std::stringbuf::str() gets the string contents of the buffer // insert the buffer contents pre-appended by the appropriate prefix into the stream mOutput << mPrefix << str(); // set the buffer to empty str(""); // flush the stream mOutput.flush(); } } void setShouldLog(bool shouldLog) { mShouldLog = shouldLog; } private: std::ostream& mOutput; std::string mPrefix; bool mShouldLog; }; //! //! \class LogStreamConsumerBase //! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer //! class LogStreamConsumerBase { public: LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog) : mBuffer(stream, prefix, shouldLog) { } protected: LogStreamConsumerBuffer mBuffer; }; //! //! \class LogStreamConsumer //! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages. //! Order of base classes is LogStreamConsumerBase and then std::ostream. //! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field //! in LogStreamConsumer and then the address of the buffer is passed to std::ostream. //! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream. //! Please do not change the order of the parent classes. //! class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream { public: //! \brief Creates a LogStreamConsumer which logs messages with level severity. //! Reportable severity determines if the messages are severe enough to be logged. LogStreamConsumer(Severity reportableSeverity, Severity severity) : LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity) , std::ostream(&mBuffer) // links the stream buffer with the stream , mShouldLog(severity <= reportableSeverity) , mSeverity(severity) { } LogStreamConsumer(LogStreamConsumer&& other) : LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog) , std::ostream(&mBuffer) // links the stream buffer with the stream , mShouldLog(other.mShouldLog) , mSeverity(other.mSeverity) { } void setReportableSeverity(Severity reportableSeverity) { mShouldLog = mSeverity <= reportableSeverity; mBuffer.setShouldLog(mShouldLog); } private: static std::ostream& severityOstream(Severity severity) { return severity >= Severity::kINFO ? std::cout : std::cerr; } static std::string severityPrefix(Severity severity) { switch (severity) { case Severity::kINTERNAL_ERROR: return "[F] "; case Severity::kERROR: return "[E] "; case Severity::kWARNING: return "[W] "; case Severity::kINFO: return "[I] "; case Severity::kVERBOSE: return "[V] "; default: assert(0); return ""; } } bool mShouldLog; Severity mSeverity; }; //! \class Logger //! //! \brief Class which manages logging of TensorRT tools and samples //! //! \details This class provides a common interface for TensorRT tools and samples to log information to the console, //! and supports logging two types of messages: //! //! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal) //! - Test pass/fail messages //! //! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is //! that the logic for controlling the verbosity and formatting of sample output is centralized in one location. //! //! In the future, this class could be extended to support dumping test results to a file in some standard format //! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run). //! //! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger //! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT //! library and messages coming from the sample. //! //! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the //! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger //! object. class Logger : public nvinfer1::ILogger { public: Logger(Severity severity = Severity::kWARNING) : mReportableSeverity(severity) { } //! //! \enum TestResult //! \brief Represents the state of a given test //! enum class TestResult { kRUNNING, //!< The test is running kPASSED, //!< The test passed kFAILED, //!< The test failed kWAIVED //!< The test was waived }; //! //! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger //! \return The nvinfer1::ILogger associated with this Logger //! //! TODO Once all samples are updated to use this method to register the logger with TensorRT, //! we can eliminate the inheritance of Logger from ILogger //! nvinfer1::ILogger& getTRTLogger() { return *this; } //! //! \brief Implementation of the nvinfer1::ILogger::log() virtual method //! //! Note samples should not be calling this function directly; it will eventually go away once we eliminate the //! inheritance from nvinfer1::ILogger //! void log(Severity severity, const char* msg) override { LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl; } //! //! \brief Method for controlling the verbosity of logging output //! //! \param severity The logger will only emit messages that have severity of this level or higher. //! void setReportableSeverity(Severity severity) { mReportableSeverity = severity; } //! //! \brief Opaque handle that holds logging information for a particular test //! //! This object is an opaque handle to information used by the Logger to print test results. //! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used //! with Logger::reportTest{Start,End}(). //! class TestAtom { public: TestAtom(TestAtom&&) = default; private: friend class Logger; TestAtom(bool started, const std::string& name, const std::string& cmdline) : mStarted(started) , mName(name) , mCmdline(cmdline) { } bool mStarted; std::string mName; std::string mCmdline; }; //! //! \brief Define a test for logging //! //! \param[in] name The name of the test. This should be a string starting with //! "TensorRT" and containing dot-separated strings containing //! the characters [A-Za-z0-9_]. //! For example, "TensorRT.sample_googlenet" //! \param[in] cmdline The command line used to reproduce the test // //! \return a TestAtom that can be used in Logger::reportTest{Start,End}(). //! static TestAtom defineTest(const std::string& name, const std::string& cmdline) { return TestAtom(false, name, cmdline); } //! //! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments //! as input //! //! \param[in] name The name of the test //! \param[in] argc The number of command-line arguments //! \param[in] argv The array of command-line arguments (given as C strings) //! //! \return a TestAtom that can be used in Logger::reportTest{Start,End}(). static TestAtom defineTest(const std::string& name, int argc, char const* const* argv) { auto cmdline = genCmdlineString(argc, argv); return defineTest(name, cmdline); } //! //! \brief Report that a test has started. //! //! \pre reportTestStart() has not been called yet for the given testAtom //! //! \param[in] testAtom The handle to the test that has started //! static void reportTestStart(TestAtom& testAtom) { reportTestResult(testAtom, TestResult::kRUNNING); assert(!testAtom.mStarted); testAtom.mStarted = true; } //! //! \brief Report that a test has ended. //! //! \pre reportTestStart() has been called for the given testAtom //! //! \param[in] testAtom The handle to the test that has ended //! \param[in] result The result of the test. Should be one of TestResult::kPASSED, //! TestResult::kFAILED, TestResult::kWAIVED //! static void reportTestEnd(const TestAtom& testAtom, TestResult result) { assert(result != TestResult::kRUNNING); assert(testAtom.mStarted); reportTestResult(testAtom, result); } static int reportPass(const TestAtom& testAtom) { reportTestEnd(testAtom, TestResult::kPASSED); return EXIT_SUCCESS; } static int reportFail(const TestAtom& testAtom) { reportTestEnd(testAtom, TestResult::kFAILED); return EXIT_FAILURE; } static int reportWaive(const TestAtom& testAtom) { reportTestEnd(testAtom, TestResult::kWAIVED); return EXIT_SUCCESS; } static int reportTest(const TestAtom& testAtom, bool pass) { return pass ? reportPass(testAtom) : reportFail(testAtom); } Severity getReportableSeverity() const { return mReportableSeverity; } private: //! //! \brief returns an appropriate string for prefixing a log message with the given severity //! static const char* severityPrefix(Severity severity) { switch (severity) { case Severity::kINTERNAL_ERROR: return "[F] "; case Severity::kERROR: return "[E] "; case Severity::kWARNING: return "[W] "; case Severity::kINFO: return "[I] "; case Severity::kVERBOSE: return "[V] "; default: assert(0); return ""; } } //! //! \brief returns an appropriate string for prefixing a test result message with the given result //! static const char* testResultString(TestResult result) { switch (result) { case TestResult::kRUNNING: return "RUNNING"; case TestResult::kPASSED: return "PASSED"; case TestResult::kFAILED: return "FAILED"; case TestResult::kWAIVED: return "WAIVED"; default: assert(0); return ""; } } //! //! \brief returns an appropriate output stream (cout or cerr) to use with the given severity //! static std::ostream& severityOstream(Severity severity) { return severity >= Severity::kINFO ? std::cout : std::cerr; } //! //! \brief method that implements logging test results //! static void reportTestResult(const TestAtom& testAtom, TestResult result) { severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # " << testAtom.mCmdline << std::endl; } //! //! \brief generate a command line string from the given (argc, argv) values //! static std::string genCmdlineString(int argc, char const* const* argv) { std::stringstream ss; for (int i = 0; i < argc; i++) { if (i > 0) ss << " "; ss << argv[i]; } return ss.str(); } Severity mReportableSeverity; }; namespace { //! //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE //! //! Example usage: //! //! LOG_VERBOSE(logger) << "hello world" << std::endl; //! inline LogStreamConsumer LOG_VERBOSE(const Logger& logger) { return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE); } //! //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO //! //! Example usage: //! //! LOG_INFO(logger) << "hello world" << std::endl; //! inline LogStreamConsumer LOG_INFO(const Logger& logger) { return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO); } //! //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING //! //! Example usage: //! //! LOG_WARN(logger) << "hello world" << std::endl; //! inline LogStreamConsumer LOG_WARN(const Logger& logger) { return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING); } //! //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR //! //! Example usage: //! //! LOG_ERROR(logger) << "hello world" << std::endl; //! inline LogStreamConsumer LOG_ERROR(const Logger& logger) { return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR); } //! //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR // ("fatal" severity) //! //! Example usage: //! //! LOG_FATAL(logger) << "hello world" << std::endl; //! inline LogStreamConsumer LOG_FATAL(const Logger& logger) { return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR); } } // anonymous namespace #endif // TENSORRT_LOGGING_H
PyTorch/Segmentation/MaskRCNN/pytorch/scripts
scripts
inference
#!/bin/bash # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. #Predictions will be stored in `FOLDER`/inference` #1x8x4 DGX1V GPU=1 # uncomment below to use default # CONFIG='configs/e2e_mask_rcnn_R_50_FPN_1x.yaml' CONFIG="$1" #This folder should a file called 'last_checkpoint' which contains the path to the actual checkpoint FOLDER='/results' #Example # /results # ------last_checkpoint # ------model.pth # # last_checkpoint #----------------------------- #|/results/model.pth | #| | #| | #| | #| | #| | #----------------------------- LOGFILE="$FOLDER/joblog.log" if ! [ -d "$FOLDER" ]; then mkdir $FOLDER; fi #Use a different argument with DATASET.TEST to use your own python3 -m torch.distributed.launch --nproc_per_node=$GPU tools/test_net.py \ --config-file $CONFIG \ --skip-eval \ DTYPE "float16" \ DATASETS.TEST "(\"coco_2017_val\",)" \ OUTPUT_DIR $FOLDER \ TEST.IMS_PER_BATCH 1 \ | tee $LOGFILE
PyTorch/SpeechRecognition/QuartzNet/utils
utils
download_librispeech
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env python import os import argparse import pandas as pd from download_utils import download_file, md5_checksum, extract parser = argparse.ArgumentParser(description='Download, verify and extract dataset files') parser.add_argument('csv', type=str, help='CSV file with urls and checksums to download.') parser.add_argument('dest', type=str, help='Download destnation folder.') parser.add_argument('-e', type=str, default=None, help='Extraction destnation folder. Defaults to download folder if not provided') parser.add_argument('--skip_download', action='store_true', help='Skip downloading the files') parser.add_argument('--skip_checksum', action='store_true', help='Skip checksum') parser.add_argument('--skip_extract', action='store_true', help='Skip extracting files') args = parser.parse_args() args.e = args.e or args.dest df = pd.read_csv(args.csv, delimiter=',') if not args.skip_download: for url in df.url: fname = url.split('/')[-1] print("Downloading %s:" % fname) download_file(url=url, dest_folder=args.dest, fname=fname) else: print("Skipping file download") if not args.skip_checksum: for index, row in df.iterrows(): url = row['url'] md5 = row['md5'] fname = url.split('/')[-1] fpath = os.path.join(args.dest, fname) print("Verifing %s: " % fname, end='') ret = md5_checksum(fpath=fpath, target_hash=md5) print("Passed" if ret else "Failed") else: print("Skipping checksum") if not args.skip_extract: for url in df.url: fname = url.split('/')[-1] fpath = os.path.join(args.dest, fname) print("Decompressing %s:" % fpath) extract(fpath=fpath, dest_folder=args.e) else: print("Skipping file extraction")
TensorFlow/Segmentation/UNet_Medical/examples
examples
unet_INFER_TF-AMP
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script launches U-Net run in FP16 on 1 GPU for inference batch_size 1. Usage: # bash unet_INFER_TF-AMP.sh <path to dataset> <path to results directory> horovodrun -np 1 python main.py --data_dir $1 --model_dir $2 --batch_size 1 --exec_mode predict --xla --amp
TensorFlow2/LanguageModeling/BERT
BERT
squad_lib_sp
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Run ALBERT on SQuAD 1.1 and SQuAD 2.0 using sentence piece tokenization. The file is forked from: https://github.com/google-research/ALBERT/blob/master/run_squad_sp.py """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math from absl import logging import numpy as np import tensorflow as tf import tokenization class SquadExample(object): """A single training/test example for simple sequence classification. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, paragraph_text, orig_answer_text=None, start_position=None, end_position=None, is_impossible=False): self.qas_id = qas_id self.question_text = question_text self.paragraph_text = paragraph_text self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) s += ", question_text: %s" % ( tokenization.printable_text(self.question_text)) s += ", paragraph_text: [%s]" % (" ".join(self.paragraph_text)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.start_position: s += ", end_position: %d" % (self.end_position) if self.start_position: s += ", is_impossible: %r" % (self.is_impossible) return s class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tok_start_to_orig_index, tok_end_to_orig_index, token_is_max_context, tokens, input_ids, input_mask, segment_ids, paragraph_len, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tok_start_to_orig_index = tok_start_to_orig_index self.tok_end_to_orig_index = tok_end_to_orig_index self.token_is_max_context = token_is_max_context self.tokens = tokens self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.paragraph_len = paragraph_len self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def read_squad_examples(input_file, is_training, version_2_with_negative): """Read a SQuAD json file into a list of SquadExample.""" del version_2_with_negative with tf.io.gfile.GFile(input_file, "r") as reader: input_data = json.load(reader)["data"] examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None orig_answer_text = None is_impossible = False if is_training: is_impossible = qa.get("is_impossible", False) if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] start_position = answer["answer_start"] else: start_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, paragraph_text=paragraph_text, orig_answer_text=orig_answer_text, start_position=start_position, is_impossible=is_impossible) examples.append(example) return examples def _convert_index(index, pos, m=None, is_start=True): """Converts index.""" if index[pos] is not None: return index[pos] n = len(index) rear = pos while rear < n - 1 and index[rear] is None: rear += 1 front = pos while front > 0 and index[front] is None: front -= 1 assert index[front] is not None or index[rear] is not None if index[front] is None: if index[rear] >= 1: if is_start: return 0 else: return index[rear] - 1 return index[rear] if index[rear] is None: if m is not None and index[front] < m - 1: if is_start: return index[front] + 1 else: return m - 1 return index[front] if is_start: if index[rear] > index[front] + 1: return index[front] + 1 else: return index[rear] else: if index[rear] > index[front] + 1: return index[rear] - 1 else: return index[front] def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, output_fn, do_lower_case, batch_size=None): """Loads a data file into a list of `InputBatch`s.""" cnt_pos, cnt_neg = 0, 0 base_id = 1000000000 unique_id = base_id max_n, max_m = 1024, 1024 f = np.zeros((max_n, max_m), dtype=np.float32) for (example_index, example) in enumerate(examples): if example_index % 100 == 0: logging.info("Converting %d/%d pos %d neg %d", example_index, len(examples), cnt_pos, cnt_neg) query_tokens = tokenization.encode_ids( tokenizer.sp_model, tokenization.preprocess_text( example.question_text, lower=do_lower_case)) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] paragraph_text = example.paragraph_text para_tokens = tokenization.encode_pieces( tokenizer.sp_model, tokenization.preprocess_text( example.paragraph_text, lower=do_lower_case)) chartok_to_tok_index = [] tok_start_to_chartok_index = [] tok_end_to_chartok_index = [] char_cnt = 0 for i, token in enumerate(para_tokens): new_token = token.replace(tokenization.SPIECE_UNDERLINE, " ") chartok_to_tok_index.extend([i] * len(new_token)) tok_start_to_chartok_index.append(char_cnt) char_cnt += len(new_token) tok_end_to_chartok_index.append(char_cnt - 1) tok_cat_text = "".join(para_tokens).replace(tokenization.SPIECE_UNDERLINE, " ") n, m = len(paragraph_text), len(tok_cat_text) if n > max_n or m > max_m: max_n = max(n, max_n) max_m = max(m, max_m) f = np.zeros((max_n, max_m), dtype=np.float32) g = {} # pylint: disable=cell-var-from-loop def _lcs_match(max_dist, n=n, m=m): """Longest-common-substring algorithm.""" f.fill(0) g.clear() ### longest common sub sequence # f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j)) for i in range(n): # unlike standard LCS, this is specifically optimized for the setting # because the mismatch between sentence pieces and original text will # be small for j in range(i - max_dist, i + max_dist): if j >= m or j < 0: continue if i > 0: g[(i, j)] = 0 f[i, j] = f[i - 1, j] if j > 0 and f[i, j - 1] > f[i, j]: g[(i, j)] = 1 f[i, j] = f[i, j - 1] f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0 if (tokenization.preprocess_text( paragraph_text[i], lower=do_lower_case, remove_space=False) == tok_cat_text[j] and f_prev + 1 > f[i, j]): g[(i, j)] = 2 f[i, j] = f_prev + 1 # pylint: enable=cell-var-from-loop max_dist = abs(n - m) + 5 for _ in range(2): _lcs_match(max_dist) if f[n - 1, m - 1] > 0.8 * n: break max_dist *= 2 orig_to_chartok_index = [None] * n chartok_to_orig_index = [None] * m i, j = n - 1, m - 1 while i >= 0 and j >= 0: if (i, j) not in g: break if g[(i, j)] == 2: orig_to_chartok_index[i] = j chartok_to_orig_index[j] = i i, j = i - 1, j - 1 elif g[(i, j)] == 1: j = j - 1 else: i = i - 1 if (all(v is None for v in orig_to_chartok_index) or f[n - 1, m - 1] < 0.8 * n): logging.info("MISMATCH DETECTED!") continue tok_start_to_orig_index = [] tok_end_to_orig_index = [] for i in range(len(para_tokens)): start_chartok_pos = tok_start_to_chartok_index[i] end_chartok_pos = tok_end_to_chartok_index[i] start_orig_pos = _convert_index( chartok_to_orig_index, start_chartok_pos, n, is_start=True) end_orig_pos = _convert_index( chartok_to_orig_index, end_chartok_pos, n, is_start=False) tok_start_to_orig_index.append(start_orig_pos) tok_end_to_orig_index.append(end_orig_pos) if not is_training: tok_start_position = tok_end_position = None if is_training and example.is_impossible: tok_start_position = 0 tok_end_position = 0 if is_training and not example.is_impossible: start_position = example.start_position end_position = start_position + len(example.orig_answer_text) - 1 start_chartok_pos = _convert_index( orig_to_chartok_index, start_position, is_start=True) tok_start_position = chartok_to_tok_index[start_chartok_pos] end_chartok_pos = _convert_index( orig_to_chartok_index, end_position, is_start=False) tok_end_position = chartok_to_tok_index[end_chartok_pos] assert tok_start_position <= tok_end_position def _piece_to_id(x): return tokenizer.sp_model.PieceToId(x) all_doc_tokens = list(map(_piece_to_id, para_tokens)) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_is_max_context = {} segment_ids = [] cur_tok_start_to_orig_index = [] cur_tok_end_to_orig_index = [] tokens.append(tokenizer.sp_model.PieceToId("[CLS]")) segment_ids.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) tokens.append(tokenizer.sp_model.PieceToId("[SEP]")) segment_ids.append(0) for i in range(doc_span.length): split_token_index = doc_span.start + i cur_tok_start_to_orig_index.append( tok_start_to_orig_index[split_token_index]) cur_tok_end_to_orig_index.append( tok_end_to_orig_index[split_token_index]) is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(1) tokens.append(tokenizer.sp_model.PieceToId("[SEP]")) segment_ids.append(1) paragraph_len = len(tokens) input_ids = tokens # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length span_is_impossible = example.is_impossible start_position = None end_position = None if is_training and not span_is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: # continue start_position = 0 end_position = 0 span_is_impossible = True else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and span_is_impossible: start_position = 0 end_position = 0 if example_index < 20: logging.info("*** Example ***") logging.info("unique_id: %s", (unique_id)) logging.info("example_index: %s", (example_index)) logging.info("doc_span_index: %s", (doc_span_index)) logging.info("tok_start_to_orig_index: %s", " ".join([str(x) for x in cur_tok_start_to_orig_index])) logging.info("tok_end_to_orig_index: %s", " ".join([str(x) for x in cur_tok_end_to_orig_index])) logging.info( "token_is_max_context: %s", " ".join( ["%d:%s" % (x, y) for (x, y) in token_is_max_context.items()])) logging.info( "input_pieces: %s", " ".join([tokenizer.sp_model.IdToPiece(x) for x in tokens])) logging.info("input_ids: %s", " ".join([str(x) for x in input_ids])) logging.info("input_mask: %s", " ".join([str(x) for x in input_mask])) logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) if is_training and span_is_impossible: logging.info("impossible example span") if is_training and not span_is_impossible: pieces = [ tokenizer.sp_model.IdToPiece(token) for token in tokens[start_position:(end_position + 1)] ] answer_text = tokenizer.sp_model.DecodePieces(pieces) logging.info("start_position: %d", (start_position)) logging.info("end_position: %d", (end_position)) logging.info("answer: %s", (tokenization.printable_text(answer_text))) # With multi processing, the example_index is actually the index # within the current process therefore we use example_index=None # to avoid being used in the future. # The current code does not use example_index of training data. if is_training: feat_example_index = None else: feat_example_index = example_index feature = InputFeatures( unique_id=unique_id, example_index=feat_example_index, doc_span_index=doc_span_index, tok_start_to_orig_index=cur_tok_start_to_orig_index, tok_end_to_orig_index=cur_tok_end_to_orig_index, token_is_max_context=token_is_max_context, tokens=[tokenizer.sp_model.IdToPiece(x) for x in tokens], input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, paragraph_len=paragraph_len, start_position=start_position, end_position=end_position, is_impossible=span_is_impossible) # Run callback if is_training: output_fn(feature) else: output_fn(feature, is_padding=False) unique_id += 1 if span_is_impossible: cnt_neg += 1 else: cnt_pos += 1 if not is_training and feature: assert batch_size num_padding = 0 num_examples = unique_id - base_id if unique_id % batch_size != 0: num_padding = batch_size - (num_examples % batch_size) dummy_feature = copy.deepcopy(feature) for _ in range(num_padding): dummy_feature.unique_id = unique_id # Run callback output_fn(feature, is_padding=True) unique_id += 1 logging.info("Total number of instances: %d = pos %d neg %d", cnt_pos + cnt_neg, cnt_pos, cnt_neg) return unique_id - base_id def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, version_2_with_negative=False, null_score_diff_threshold=0.0, verbose=False): """Write final predictions to the json file and log-odds of null if needed.""" del do_lower_case, verbose logging.info("Writing predictions to: %s", (output_prediction_file)) logging.info("Writing nbest to: %s", (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min mull score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: doc_offset = feature.tokens.index("[SEP]") + 1 # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index - doc_offset >= len(feature.tok_start_to_orig_index): continue if end_index - doc_offset >= len(feature.tok_end_to_orig_index): continue # if start_index not in feature.tok_start_to_orig_index: # continue # if end_index not in feature.tok_end_to_orig_index: # continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index - doc_offset, end_index=end_index - doc_offset, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=-1, end_index=-1, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index >= 0: # this is a non-null prediction tok_start_to_orig_index = feature.tok_start_to_orig_index tok_end_to_orig_index = feature.tok_end_to_orig_index start_orig_pos = tok_start_to_orig_index[pred.start_index] end_orig_pos = tok_end_to_orig_index[pred.end_index] paragraph_text = example.paragraph_text final_text = paragraph_text[start_orig_pos:end_orig_pos + 1].strip() if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't inlude the empty option in the n-best, inlcude it if version_2_with_negative: if "" not in seen_predictions: nbest.append( _NbestPrediction( text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: assert best_non_null_entry is not None # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - ( best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with tf.io.gfile.GFile(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with tf.io.gfile.GFile(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with tf.io.gfile.GFile(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs class FeatureWriter(object): """Writes InputFeature to TF example file.""" def __init__(self, filename, is_training): self.filename = filename self.is_training = is_training self.num_features = 0 self._writer = tf.io.TFRecordWriter(filename) def process_feature(self, feature): """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" self.num_features += 1 def create_int_feature(values): feature = tf.train.Feature( int64_list=tf.train.Int64List(value=list(values))) return feature features = collections.OrderedDict() features["unique_ids"] = create_int_feature([feature.unique_id]) features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) if self.is_training: features["start_positions"] = create_int_feature([feature.start_position]) features["end_positions"] = create_int_feature([feature.end_position]) impossible = 0 if feature.is_impossible: impossible = 1 features["is_impossible"] = create_int_feature([impossible]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) self._writer.write(tf_example.SerializeToString()) def close(self): self._writer.close() def generate_tf_record_from_json_file(input_file_path, sp_model_file, output_path, max_seq_length=384, do_lower_case=True, max_query_length=64, doc_stride=128, version_2_with_negative=False): """Generates and saves training data into a tf record file.""" train_examples = read_squad_examples( input_file=input_file_path, is_training=True, version_2_with_negative=version_2_with_negative) tokenizer = tokenization.FullSentencePieceTokenizer( sp_model_file=sp_model_file) train_writer = FeatureWriter(filename=output_path, is_training=True) number_of_examples = convert_examples_to_features( examples=train_examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=True, output_fn=train_writer.process_feature, do_lower_case=do_lower_case) train_writer.close() meta_data = { "task_type": "bert_squad", "train_data_size": number_of_examples, "max_seq_length": max_seq_length, "max_query_length": max_query_length, "doc_stride": doc_stride, "version_2_with_negative": version_2_with_negative, } return meta_data
TensorFlow2/Recommendation/WideAndDeep/trainer/utils
utils
benchmark
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import dllogger import horovod.tensorflow as hvd import tensorflow as tf from horovod.tensorflow.mpi_ops import Sum class ThroughputCalculator: def __init__(self, args): self.args = args self.boundary = max(self.args.benchmark_warmup_steps, 1) self.step = 0 self.t0 = None self.start_batch_time = None with tf.device("/CPU:0"): self.samples = tf.Variable(0, trainable=False, dtype=tf.int64) def _init_benchmark(self): self.t0 = time.perf_counter() def on_epoch_end_log(self, step, shape): batch_time = time.perf_counter() - self.start_batch_time self.samples.assign_add(shape) workers = hvd.size() if not self.args.cpu else 1 samplesps = shape * workers / batch_time if self.args.cpu or hvd.rank() == 0: dllogger.log(data={"batch_samplesps": samplesps}, step=(1, step)) def on_benchmark_end_log(self, eval_benchmark=False): train_time = time.perf_counter() - self.t0 hvd.join() if not self.args.cpu: all_samples = hvd.allreduce(self.samples, op=Sum) else: all_samples = self.samples all_samples = all_samples.numpy() if self.args.cpu or hvd.rank() == 0: key = "train_throughput" if not eval_benchmark else "validation_throughput" throughput = all_samples / train_time dllogger.log(data={key: throughput}, step=tuple()) def __call__(self, shape, eval_benchmark=False): if self.args.benchmark: if self.step == self.boundary: self._init_benchmark() if self.step > self.boundary: self.on_epoch_end_log(self.step, shape) if self.args.benchmark_steps <= self.step: self.on_benchmark_end_log(eval_benchmark=eval_benchmark) exit(0) self.step += 1 self.start_batch_time = time.perf_counter()
TensorFlow/Segmentation/UNet_Industrial/notebooks
notebooks
TensorFlow_UNet_Industrial_TF_train_and_inference
#!/usr/bin/env python # coding: utf-8 # <a href="https://colab.research.google.com/github/vinhngx/DeepLearningExamples/blob/vinhn_unet_industrial_demo/TensorFlow/Segmentation/UNet_Industrial/notebooks/TensorFlow_UNet_Industrial_TF_train_and_inference.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # In[1]: # Copyright 2019 NVIDIA Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;"> # # # UNet Industrial Training and Inference Demo # ## Overview # # # This U-Net model is adapted from the original version of the [U-Net model](https://arxiv.org/abs/1505.04597) which is # a convolutional auto-encoder for 2D image segmentation. U-Net was first introduced by # Olaf Ronneberger, Philip Fischer, and Thomas Brox in the paper: # [U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597). # # This work proposes a modified version of U-Net, called `TinyUNet` which performs efficiently and with very high accuracy # on the industrial anomaly dataset [DAGM2007](https://resources.mpi-inf.mpg.de/conference/dagm/2007/prizes.html). # *TinyUNet*, like the original *U-Net* is composed of two parts: # - an encoding sub-network (left-side) # - a decoding sub-network (right-side). # # It repeatedly applies 3 downsampling blocks composed of two 2D convolutions followed by a 2D max pooling # layer in the encoding sub-network. In the decoding sub-network, 3 upsampling blocks are composed of a upsample2D # layer followed by a 2D convolution, a concatenation operation with the residual connection and two 2D convolutions. # # `TinyUNet` has been introduced to reduce the model capacity which was leading to a high degree of over-fitting on a # small dataset like DAGM2007. The complete architecture is presented in the figure below: # # ![UnetModel](https://github.com/vinhngx/DeepLearningExamples/blob/vinhn_unet_industrial_demo/TensorFlow/Segmentation/UNet_Industrial/images/unet.png?raw=1) # # # # ### Learning objectives # # This notebook demonstrates the steps for training a UNet model. We then employ the trained model to make inference on new images. # # ## Content # 1. [Requirements](#1) # 1. [Data download and preprocessing](#2) # 1. [Training](#3) # 1. [Testing trained model](#4) # # <a id="1"></a> # ## 1. Requirements # # # ### 1.1 Docker container # The most convenient way to make use of the NVIDIA Tensorflow UNet model is via a docker container, which provides a self-contained, isolated and re-producible environment for all experiments. Refer to the [Quick Start Guide section](https://github.com/vinhngx/DeepLearningExamples/tree/vinhn_unet_industrial_demo/TensorFlow/Segmentation/UNet_Industrial#requirements) of the Readme documentation for a comprehensive guide. We briefly summarize the steps here. # # First, clone the repository: # # ``` # git clone https://github.com/NVIDIA/DeepLearningExamples.git # cd DeepLearningExamples/TensorFlow/Segmentation/UNet_Industrial # ``` # # Next, build the NVIDIA UNet_Industrial container: # # ``` # docker build . --rm -t unet_industrial:latest # ``` # # Then launch the container with: # # ``` # nvidia-docker run -it --rm \ # --shm-size=2g --ulimit memlock=-1 --ulimit stack=67108864 \ # -v /path/to/dataset:/data/dagm2007/ \ # -v /path/to/results:/results \ # unet_industrial:latest # ``` # where `/path/to/dataset` is the path on the host machine where the data was/is to be downloaded. More on data set preparation in the next section. `/path/to/results` is wher the trained model will be stored. # # Within the docker interactive bash session, start Jupyter with # # ``` # jupyter notebook --ip 0.0.0.0 --port 8888 # ``` # # Then open the Jupyter GUI interface on your host machine at http://localhost:8888. Within the container, this notebook itself is located at `/workspace/unet_industrial/notebooks`. # # ### 1.2 Hardware # This notebook can be executed on any CUDA-enabled NVIDIA GPU, although for efficient mixed precision training, a [Tensor Core NVIDIA GPU](https://www.nvidia.com/en-us/data-center/tensorcore/) is desired (Volta, Turing or newer architectures). # In[2]: get_ipython().system('nvidia-smi') # <a id="2"></a> # ## 2. Data download and preprocessing # # We will first download some data, in particular, the [Weakly Supervised Learning for Industrial Optical Inspection (DAGM 2007)](https://resources.mpi-inf.mpg.de/conference/dagm/2007/prizes.html) dataset. # # > The competition is inspired by problems from industrial image processing. In order to satisfy their customers' needs, companies have to guarantee the quality of their products, which can often be achieved only by inspection of the finished product. Automatic visual defect detection has the potential to reduce the cost of quality assurance significantly. # > # > The competitors have to design a stand-alone algorithm which is able to detect miscellaneous defects on various background textures. # > # > The particular challenge of this contest is that the algorithm must learn, without human intervention, to discern defects automatically from a weakly labeled (i.e., labels are not exact to the pixel level) training set, the exact characteristics of which are unknown at development time. During the competition, the programs have to be trained on new data without any human guidance. # # **Source:** https://resources.mpi-inf.mpg.de/conference/dagm/2007/prizes.html # # # **Important Information**: The data download script below will download the *public* DAGM 2007 data set, while the *private* DAGM 2007 requires an account to be downloaded. The script will invite you to download them manually and put them in the correct directory for subsequent pre-processing. We will employ the *private* DAGM data set of 10 classes to train UNnet models. # In[4]: get_ipython().system(' ../download_and_preprocess_dagm2007.sh ./data') # Within the docker container, the final data directory should look like: # # ``` # ./data # raw_images # public # Class1 # Class2 # Class3 # Class4 # Class5 # Class6 # Class1_def # Class2_def # Class3_def # Class4_def # Class5_def # Class6_def # private # Class1 # Class10 # Class2 # Class3 # Class4 # Class5 # Class6 # Class7 # Class8 # Class9 # zip_files # ``` # <a id="3"></a> # ## 3. Training # The repository provides several training recipes with 1, 4 and 8 GPU with FP32 and automatic mixed precision in `./script`. # ### 3.1 Training with 1 GPU # # #### Training with full precision # Training on 1 GPU with FP32 with the following syntax: # # ``` # ./UNet_FP32_1GPU.sh <path to result repository> <path to dataset> <DAGM2007 classID (1-10) # ``` # # For example: # ``` # ../scripts/UNet_FP32_1GPU.sh ./results/1GPU-FP32 /CDR/DAGM10 1 # ``` # # Note that inside the shell script it make use of the main python trains script. # In[ ]: RESULT_DIR="./results/1GPU-FP32" DATA_DIR="/CDR/DAGM10" CLASS="1" get_ipython().run_line_magic('run', "../main.py --unet_variant='tinyUNet' --activation_fn='relu' --exec_mode='train_and_evaluate' --iter_unit='batch' --num_iter=2500 --batch_size=16 --warmup_step=10 --results_dir={RESULT_DIR} --data_dir={DATA_DIR} --dataset_name='DAGM2007' --dataset_classID={CLASS} --data_format='NCHW' --use_auto_loss_scaling --noamp --noxla --learning_rate=1e-4 --learning_rate_decay_factor=0.8 --learning_rate_decay_steps=500 --rmsprop_decay=0.9 --rmsprop_momentum=0.8 --loss_fn_name='adaptive_loss' --weight_decay=1e-5 --weight_init_method='he_uniform' --augment_data --display_every=250 --debug_verbosity=0") # #### Training with mixed-precision # We now launch the training process using mixed precision. Training on 1 GPU with automatic mixed precision (AMP) with the following syntax: # # ``` # ./UNet_AMP_1GPU.sh <path to result repository> <path to dataset> <DAGM2007 classID (1-10) # ``` # # For example: # ``` # ../scripts/UNet_AMP_1GPU.sh ./results/1GPU-AMP/CDR/DAGM10 1 # ``` # # Note that inside the shell script it make use of the main python trains script. # In[ ]: RESULT_DIR="./results/1GPU-FP16" DATA_DIR="/CDR/DAGM10" CLASS="1" get_ipython().run_line_magic('run', "../main.py --unet_variant='tinyUNet' --activation_fn='relu' --exec_mode='train_and_evaluate' --iter_unit='batch' --num_iter=2500 --batch_size=16 --warmup_step=10 --results_dir={RESULT_DIR} --data_dir={DATA_DIR} --dataset_name='DAGM2007' --dataset_classID={CLASS} --data_format='NCHW' --use_auto_loss_scaling --amp --noxla --learning_rate=1e-4 --learning_rate_decay_factor=0.8 --learning_rate_decay_steps=500 --rmsprop_decay=0.9 --rmsprop_momentum=0.8 --loss_fn_name='adaptive_loss' --weight_decay=1e-5 --weight_init_method='he_uniform' --augment_data --display_every=250 --debug_verbosity=0") # ### 3.2. Training with 8 GPUs # # We provide training recipes for 8 GPUs using FP32 or automatic mixed precision (AMP) # # #### Training with full precision # # In[ ]: get_ipython().system('../scripts/UNet_FP32_8GPU.sh ./results/8GPU-FP32 /CDR/DAGM10 1') # #### Training with automatic mixed precision (AMP) # In[ ]: get_ipython().system('../scripts/UNet_AMP_8GPU.sh ./results/8GPU-AMP /CDR/DAGM10 1') # <a id="4"></a> # ## 4. Testing trained model # # After model training has completed, we can test the trained model against the DAGM 2007 public data set which wasn't used for training. First, we load some required libraries and define some helper functions to load images. # In[11]: import sys sys.path.insert(0,'..') from model.unet import UNet_v1 import numpy as np get_ipython().run_line_magic('matplotlib', 'inline') import matplotlib.pyplot as plt import matplotlib.image as mpimg # In[12]: img = mpimg.imread('./data/raw_images/public/Class1_def/1.png') plt.figure(figsize = (10,10)) plt.imshow(img, cmap='gray') # As we can see in this figure, there exists a defective area in the top left corner. We will now load the model and carry out inference on the normalized test image. # In[13]: # Image preprocessing img = np.expand_dims(img, axis=2) img = np.expand_dims(img, axis=0) img = (img-0.5)/0.5 # Next, we start a TF session, load the trained UNet model and carry out inference on the test image. # In[16]: get_ipython().system('ls ./results/1GPU-FP16/checkpoints') # In[ ]: config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True graph = tf.Graph() with graph.as_default(): with tf.Session(config=config) as sess: network = UNet_v1( model_name="UNet_v1", input_format='NHWC', compute_format='NHWC', n_output_channels=1, unet_variant='tinyUNet', weight_init_method='he_uniform', activation_fn='relu' ) tf_input = tf.placeholder(tf.float32, [None, 512, 512, 1], name='input') outputs, logits = network.build_model(tf_input) saver = tf.train.Saver() # Restore variables from disk. saver.restore(sess, "./results/1GPU-FP16/checkpoints/model.ckpt-2500") output = sess.run([outputs, logits], feed_dict={tf_input: img}) # In[18]: # Print out model predicted mask plt.figure(figsize = (10,10)) plt.imshow(np.squeeze(output[0]), cmap='gray') # As expected, the model points out the correct defective area in this image. Please feel free to try out other defective images within `./data/raw_images/public/Class1_def/` # In[20]: get_ipython().system('ls ./data/raw_images/public/Class1_def/') # # Conclusion # # In this notebook, we have walked through the complete process of preparing the container and data required for training the UNet-Industrial models. We have also investigated various training options with FP32 and automatic mixed precision, trained and tested UNet models with new test images. # # ## What's next # Now it's time to try the UNet model on your own data. Observe the performance impact of mixed precision training while comparing the final accuracy of the models trained with FP32 and mixed precision. # # In[ ]:
PyTorch/Classification/ConvNets/efficientnet/inference/AMP
AMP
DGXA100_efficientnet-widese-b4_AMP
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision AMP --mode benchmark_inference --platform DGXA100 /imagenet -b 1 --workspace ${1:-./} --raport-file raport_1.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision AMP --mode benchmark_inference --platform DGXA100 /imagenet -b 2 --workspace ${1:-./} --raport-file raport_2.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision AMP --mode benchmark_inference --platform DGXA100 /imagenet -b 4 --workspace ${1:-./} --raport-file raport_4.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision AMP --mode benchmark_inference --platform DGXA100 /imagenet -b 8 --workspace ${1:-./} --raport-file raport_8.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision AMP --mode benchmark_inference --platform DGXA100 /imagenet -b 16 --workspace ${1:-./} --raport-file raport_16.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision AMP --mode benchmark_inference --platform DGXA100 /imagenet -b 32 --workspace ${1:-./} --raport-file raport_32.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision AMP --mode benchmark_inference --platform DGXA100 /imagenet -b 64 --workspace ${1:-./} --raport-file raport_64.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision AMP --mode benchmark_inference --platform DGXA100 /imagenet -b 128 --workspace ${1:-./} --raport-file raport_128.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision AMP --mode benchmark_inference --platform DGXA100 /imagenet -b 256 --workspace ${1:-./} --raport-file raport_256.json
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
ssdlite_mobilenet_v1_coco
# SSDLite with Mobilenet v1 configuration for MSCOCO Dataset. # Users should configure the fine_tune_checkpoint field in the train config as # well as the label_map_path and input_path fields in the train_input_reader and # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that # should be configured. model { ssd { num_classes: 90 box_coder { faster_rcnn_box_coder { y_scale: 10.0 x_scale: 10.0 height_scale: 5.0 width_scale: 5.0 } } matcher { argmax_matcher { matched_threshold: 0.5 unmatched_threshold: 0.5 ignore_thresholds: false negatives_lower_than_unmatched: true force_match_for_each_row: true } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { num_layers: 6 min_scale: 0.2 max_scale: 0.95 aspect_ratios: 1.0 aspect_ratios: 2.0 aspect_ratios: 0.5 aspect_ratios: 3.0 aspect_ratios: 0.3333 } } image_resizer { fixed_shape_resizer { height: 300 width: 300 } } box_predictor { convolutional_box_predictor { min_depth: 0 max_depth: 0 num_layers_before_predictor: 0 use_dropout: false dropout_keep_probability: 0.8 kernel_size: 3 use_depthwise: true box_code_size: 4 apply_sigmoid_to_scores: false conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } batch_norm { train: true, scale: true, center: true, decay: 0.9997, epsilon: 0.001, } } } } feature_extractor { type: 'ssd_mobilenet_v1' min_depth: 16 depth_multiplier: 1.0 use_depthwise: true conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } batch_norm { train: true, scale: true, center: true, decay: 0.9997, epsilon: 0.001, } } } loss { classification_loss { weighted_sigmoid { } } localization_loss { weighted_smooth_l1 { } } hard_example_miner { num_hard_examples: 3000 iou_threshold: 0.99 loss_type: CLASSIFICATION max_negatives_per_positive: 3 min_negatives_per_image: 0 } classification_weight: 1.0 localization_weight: 1.0 } normalize_loss_by_num_matches: true post_processing { batch_non_max_suppression { score_threshold: 1e-8 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 100 } score_converter: SIGMOID } } } train_config: { batch_size: 24 optimizer { rms_prop_optimizer: { learning_rate: { exponential_decay_learning_rate { initial_learning_rate: 0.004 decay_steps: 800720 decay_factor: 0.95 } } momentum_optimizer_value: 0.9 decay: 0.9 epsilon: 1.0 } } fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" from_detection_checkpoint: true # Note: The below line limits the training process to 200K steps, which we # empirically found to be sufficient enough to train the pets dataset. This # effectively bypasses the learning rate schedule (the learning rate will # never decay). Remove the below line to train indefinitely. num_steps: 200000 data_augmentation_options { random_horizontal_flip { } } data_augmentation_options { ssd_random_crop { } } } train_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-?????-of-00100" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" } eval_config: { num_examples: 8000 # Note: The below line limits the evaluation process to 10 evaluations. # Remove the below line to evaluate indefinitely. max_evals: 10 } eval_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" shuffle: false num_readers: 1 }
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/model_dataset
model_dataset
tft_traffic
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. model: config: n_head: 4 hidden_size: 128 dropout: 0.3 attn_dropout: 0 trainer: config: batch_size: 1024 num_epochs: 10 gradient_norm: 1.0 optimizer: lr: .001
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2PrenetPlugin
taco2PrenetPlugin
taco2PrenetLayerPlugin
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "taco2PrenetLayerPlugin.h" #include "taco2Utils.h" #include <cassert> #include <cstdlib> #include <cstring> #include <cuda_runtime.h> // cudaError_t #include <iostream> #include <sstream> #include <stdexcept> #include <string> using namespace nvinfer1; namespace nvinfer1 { namespace plugin { using value_type = Taco2PrenetLayerPlugin::value_type; /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr const char* const PLUGIN_NAME = "Taco2Prenet"; constexpr const char* const PLUGIN_VERSION = "0.1.0"; constexpr const int NUM_INPUTS = 2; } // namespace /****************************************************************************** * HELPER FUNCTIONS *********************************************************** *****************************************************************************/ namespace { const void* offset(const void* ptr, const size_t offset) { return reinterpret_cast<const void*>(static_cast<const uint8_t*>(ptr) + offset); } } // namespace /****************************************************************************** * STATIC METHODS ************************************************************* *****************************************************************************/ const char* Taco2PrenetLayerPlugin::getName() { return PLUGIN_NAME; } const char* Taco2PrenetLayerPlugin::getVersion() { return PLUGIN_VERSION; } Taco2PrenetLayerPlugin Taco2PrenetLayerPlugin::deserialize(const void* const data, const size_t length) { if (length < sizeof(int32_t) * 2) { throw std::runtime_error("Invalid serialized size: " + std::to_string(length)); } const int inputLength = static_cast<const int32_t*>(data)[0]; const int numDimension = static_cast<const int32_t*>(data)[1]; const size_t reqSize = 2 * sizeof(int32_t) + sizeof(value_type) * ((inputLength + numDimension) * numDimension); if (reqSize != length) { throw std::runtime_error( "Invalid serialized size: " + std::to_string(length) + " / " + std::to_string(reqSize)); } const Weights weights1{DataType::kFLOAT, offset(data, 2 * sizeof(int32_t)), numDimension * inputLength}; const Weights weights2{DataType::kFLOAT, offset(weights1.values, sizeof(value_type) * numDimension * inputLength), numDimension * numDimension}; Taco2PrenetLayerPlugin layer(weights1, weights2, inputLength, numDimension); return layer; } /****************************************************************************** * CONSTRUCTORS / DESTRUCTOR ************************************************** *****************************************************************************/ Taco2PrenetLayerPlugin::Taco2PrenetLayerPlugin( const Weights& weights1, const Weights& weights2, const int inputLength, const int numDimension) : mInputLength(inputLength) , mNumDimension(numDimension) , mWeights1Host(taco2::Taco2Utils::toFloatVector(weights1)) , mWeights2Host(taco2::Taco2Utils::toFloatVector(weights2)) , mKernel() , mNamespace() { if (mNumDimension <= 0) { throw std::runtime_error("Invalid Taco2Prenet dimension: " + std::to_string(mNumDimension)); } } Taco2PrenetLayerPlugin::Taco2PrenetLayerPlugin(Taco2PrenetLayerPlugin&& other) : mInputLength(other.mInputLength) , mNumDimension(other.mNumDimension) , mWeights1Host(std::move(other.mWeights1Host)) , mWeights2Host(std::move(other.mWeights2Host)) , mKernel(std::move(other.mKernel)) , mNamespace(std::move(other.mNamespace)) { other.mInputLength = 0; other.mNumDimension = 0; } Taco2PrenetLayerPlugin::~Taco2PrenetLayerPlugin() { destroy(); } /****************************************************************************** * PUBLIC METHODS ************************************************************* *****************************************************************************/ DataType Taco2PrenetLayerPlugin::getOutputDataType( const int /* index */, const DataType* const /* inputTypes */, const int /* nbInputs */) const { return DataType::kFLOAT; } const char* Taco2PrenetLayerPlugin::getPluginType() const { return getName(); } const char* Taco2PrenetLayerPlugin::getPluginVersion() const { return getVersion(); } int Taco2PrenetLayerPlugin::getNbOutputs() const { return 1; } DimsExprs Taco2PrenetLayerPlugin::getOutputDimensions( const int index, const DimsExprs* const inputs, const int nbInputs, IExprBuilder& expBuilder) { if (index >= getNbOutputs()) { throw std::runtime_error("Only has one output."); } if (nbInputs != NUM_INPUTS) { throw std::runtime_error( "Can only handle " + std::to_string(NUM_INPUTS) + " input tensors: " + std::to_string(nbInputs)); } return DimsExprs{3, {inputs[0].d[0], expBuilder.constant(1), expBuilder.constant(mNumDimension)}}; } bool Taco2PrenetLayerPlugin::supportsFormatCombination( const int pos, const PluginTensorDesc* inOut, const int /* nbInputs */, const int /* nbOutputs */) { return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT; } void Taco2PrenetLayerPlugin::configurePlugin( const DynamicPluginTensorDesc* in, const int nbInputs, const DynamicPluginTensorDesc* out, const int nbOutputs) { if (nbInputs != NUM_INPUTS) { throw std::runtime_error( "Can only handle " + std::to_string(NUM_INPUTS) + " input tensors: " + std::to_string(nbInputs)); } for (int i = 0; i < nbInputs; ++i) { if (in[i].desc.type != DataType::kFLOAT) { throw std::runtime_error("Only FLOAT supported as input " + std::to_string(i) + " : " + std::to_string(static_cast<int>(in[i].desc.type))); } } // assert dimensions { bool foundDim = false; const Dims dims = in[0].desc.dims; for (int d = 1; d < dims.nbDims; ++d) { if (dims.d[d] != 1) { if (foundDim || dims.d[d] < mInputLength) { throw std::runtime_error("Taco2Prenet input must be 1* x inputLength (" + std::to_string(mInputLength) + ") : " + taco2::Taco2Utils::dimsToString(dims)); } foundDim = true; } } if (!foundDim) { throw std::runtime_error("Taco2Prenet input must be 1* x inputLength (" + std::to_string(mInputLength) + ") x 1* : " + taco2::Taco2Utils::dimsToString(dims)); } } { bool foundDim = false; const Dims dims = in[1].desc.dims; for (int d = 1; d < dims.nbDims; ++d) { if (dims.d[d] != 1) { if (foundDim || dims.d[d] != mNumDimension) { throw std::runtime_error("Taco2Prenet input must be 1* x numDimension (" + std::to_string(mNumDimension) + ") : " + taco2::Taco2Utils::dimsToString(dims)); } foundDim = true; } } if (!foundDim) { throw std::runtime_error("Query input must be 1* x numDimension (" + std::to_string(mNumDimension) + ") x 1* : " + taco2::Taco2Utils::dimsToString(dims)); } } if (nbOutputs != 1) { throw std::runtime_error("Only one output is implemented: " + std::to_string(nbOutputs)); } for (int i = 0; i < nbOutputs; ++i) { if (out[i].desc.type != DataType::kFLOAT) { throw std::runtime_error("Only FLOAT supported as output: " + std::to_string(i) + " : " + std::to_string(static_cast<int>(out[i].desc.type))); } } } int Taco2PrenetLayerPlugin::initialize() { try { mKernel.reset(new Taco2PrenetKernel(mWeights1Host, mWeights2Host, mInputLength, mNumDimension)); } catch (const std::exception& e) { std::cerr << "Taco2PrenetLayerPlugin initialization failed: " << e.what() << std::endl; return 1; } return 0; } void Taco2PrenetLayerPlugin::terminate() { mKernel.reset(); } size_t Taco2PrenetLayerPlugin::getWorkspaceSize(const PluginTensorDesc* in, const int /* nbInputs */, const PluginTensorDesc* /* out */, const int /* nbOutputs */) const { return in[0].dims.d[0] * mNumDimension * sizeof(value_type); } int Taco2PrenetLayerPlugin::enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* /* outputDesc */, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) { const int batchSize = inputDesc[0].dims.d[0]; if (batchSize != 1) { // we only support batch size of 1 right now std::cerr << "Taco2PrenetLayerPlugin plugin does not support batch size other than 1: got " << batchSize << std::endl; std::cerr << "Recompile without plugins to use a larger batch size." << std::endl; return 1; } else if (!mKernel) { std::cerr << "Taco2PrenetLayerPlugin is not initialized properly." << std::endl; return 1; } // name inputs and outputs const value_type* const inputDevice = static_cast<const value_type*>(inputs[0]); const value_type* const dropoutDevice = static_cast<const value_type*>(inputs[1]); value_type* const outputDevice = static_cast<value_type*>(outputs[0]); mKernel->execute(inputDevice, dropoutDevice, outputDevice, static_cast<float*>(workspace), stream); return 0; } size_t Taco2PrenetLayerPlugin::getSerializationSize() const { return NUM_INPUTS * sizeof(int32_t) + sizeof(value_type) * (mNumDimension + mInputLength) * mNumDimension; } void Taco2PrenetLayerPlugin::serialize(void* const buffer) const { static_cast<int32_t*>(buffer)[0] = mInputLength; static_cast<int32_t*>(buffer)[1] = mNumDimension; float* const weights1 = reinterpret_cast<float*>(static_cast<int32_t*>(buffer) + 2); float* const weights2 = weights1 + (mInputLength * mNumDimension); memcpy(weights1, mWeights1Host.data(), sizeof(value_type) * mWeights1Host.size()); memcpy(weights2, mWeights2Host.data(), sizeof(value_type) * mWeights2Host.size()); } void Taco2PrenetLayerPlugin::destroy() { terminate(); } IPluginV2DynamicExt* Taco2PrenetLayerPlugin::clone() const { // call constructor which copy's data Taco2PrenetLayerPlugin clone( Weights{DataType::kFLOAT, mWeights1Host.data(), static_cast<int64_t>(mWeights1Host.size())}, Weights{DataType::kFLOAT, mWeights2Host.data(), static_cast<int64_t>(mWeights2Host.size())}, mInputLength, mNumDimension); if (mKernel) { // initialize the clone too clone.initialize(); } // move it to the heap last to avoid exceptions causing memory leaks return new Taco2PrenetLayerPlugin(std::move(clone)); } void Taco2PrenetLayerPlugin::setPluginNamespace(const char* pluginNamespace) { mNamespace = pluginNamespace; } const char* Taco2PrenetLayerPlugin::getPluginNamespace() const { return mNamespace.c_str(); } } // namespace plugin } // namespace nvinfer1
TensorFlow/Detection/SSD/models/research/object_detection
object_detection
model_tpu_main
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Creates and runs `Estimator` for object detection model on TPUs. This uses the TPUEstimator API to define and run a model in TRAIN/EVAL modes. """ # pylint: enable=line-too-long from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import flags import tensorflow as tf from object_detection import model_hparams from object_detection import model_lib tf.flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than plain CPUs') # Cloud TPU Cluster Resolvers flags.DEFINE_string( 'gcp_project', default=None, help='Project name for the Cloud TPU-enabled project. If not specified, we ' 'will attempt to automatically detect the GCE project from metadata.') flags.DEFINE_string( 'tpu_zone', default=None, help='GCE zone where the Cloud TPU is located in. If not specified, we ' 'will attempt to automatically detect the GCE project from metadata.') flags.DEFINE_string( 'tpu_name', default=None, help='Name of the Cloud TPU for Cluster Resolvers.') flags.DEFINE_integer('num_shards', 8, 'Number of shards (TPU cores).') flags.DEFINE_integer('iterations_per_loop', 100, 'Number of iterations per TPU training loop.') # For mode=train_and_eval, evaluation occurs after training is finished. # Note: independently of steps_per_checkpoint, estimator will save the most # recent checkpoint every 10 minutes by default for train_and_eval flags.DEFINE_string('mode', 'train', 'Mode to run: train, eval') flags.DEFINE_integer('train_batch_size', None, 'Batch size for training. If ' 'this is not provided, batch size is read from training ' 'config.') flags.DEFINE_string( 'hparams_overrides', None, 'Comma-separated list of ' 'hyperparameters to override defaults.') flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') flags.DEFINE_boolean('eval_training_data', False, 'If training data should be evaluated for this job.') flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of ' 'every n eval input examples, where n is provided.') flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample ' 'one of every n train input examples for evaluation, ' 'where n is provided. This is only used if ' '`eval_training_data` is True.') flags.DEFINE_string( 'model_dir', None, 'Path to output model directory ' 'where event and checkpoint files will be written.') flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' 'file.') FLAGS = tf.flags.FLAGS def main(unused_argv): flags.mark_flag_as_required('model_dir') flags.mark_flag_as_required('pipeline_config_path') tpu_cluster_resolver = ( tf.contrib.cluster_resolver.TPUClusterResolver( tpu=[FLAGS.tpu_name], zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)) tpu_grpc_url = tpu_cluster_resolver.get_master() config = tf.contrib.tpu.RunConfig( master=tpu_grpc_url, evaluation_master=tpu_grpc_url, model_dir=FLAGS.model_dir, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_shards)) kwargs = {} if FLAGS.train_batch_size: kwargs['batch_size'] = FLAGS.train_batch_size train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config=config, hparams=model_hparams.create_hparams(FLAGS.hparams_overrides), pipeline_config_path=FLAGS.pipeline_config_path, train_steps=FLAGS.num_train_steps, sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples, sample_1_of_n_eval_on_train_examples=( FLAGS.sample_1_of_n_eval_on_train_examples), use_tpu_estimator=True, use_tpu=FLAGS.use_tpu, num_shards=FLAGS.num_shards, save_final_config=FLAGS.mode == 'train', **kwargs) estimator = train_and_eval_dict['estimator'] train_input_fn = train_and_eval_dict['train_input_fn'] eval_input_fns = train_and_eval_dict['eval_input_fns'] eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn'] train_steps = train_and_eval_dict['train_steps'] if FLAGS.mode == 'train': estimator.train(input_fn=train_input_fn, max_steps=train_steps) # Continuously evaluating. if FLAGS.mode == 'eval': if FLAGS.eval_training_data: name = 'training_data' input_fn = eval_on_train_input_fn else: name = 'validation_data' # Currently only a single eval input is allowed. input_fn = eval_input_fns[0] model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn, train_steps, name) if __name__ == '__main__': tf.app.run()
PaddlePaddle/LanguageModeling/BERT/data
data
bertPrep
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import pprint import subprocess import BookscorpusTextFormatting import Downloader import TextSharding import WikicorpusTextFormatting def main(args): working_dir = os.environ['BERT_PREP_WORKING_DIR'] print('Working Directory:', working_dir) print('Action:', args.action) print('Dataset Name:', args.dataset) if args.input_files: args.input_files = args.input_files.split(',') hdf5_tfrecord_folder_prefix = "_lower_case_" + str(args.do_lower_case) + "_seq_len_" + str(args.max_seq_length) \ + "_max_pred_" + str(args.max_predictions_per_seq) + "_masked_lm_prob_" + str(args.masked_lm_prob) \ + "_random_seed_" + str(args.random_seed) + "_dupe_factor_" + str(args.dupe_factor) directory_structure = { 'download': working_dir + '/download', # Downloaded and decompressed 'extracted': working_dir + '/extracted', # Extracted from whatever the initial format is (e.g., wikiextractor) 'formatted': working_dir + '/formatted_one_article_per_line', # This is the level where all sources should look the same 'sharded': working_dir + '/sharded_' + "training_shards_" + str(args.n_training_shards) + "_test_shards_" + str(args.n_test_shards) + "_fraction_" + str(args.fraction_test_set), 'tfrecord': working_dir + '/tfrecord' + hdf5_tfrecord_folder_prefix, 'hdf5': working_dir + '/hdf5' + hdf5_tfrecord_folder_prefix } print('\nDirectory Structure:') pp = pprint.PrettyPrinter(indent=2) pp.pprint(directory_structure) print('') if args.action == 'download': if not os.path.exists(directory_structure['download']): os.makedirs(directory_structure['download']) downloader = Downloader.Downloader(args.dataset, directory_structure['download']) downloader.download() elif args.action == 'text_formatting': assert args.dataset != 'squad', 'Cannot perform text_formatting on squad or pretrained weights' if not os.path.exists(directory_structure['extracted']): os.makedirs(directory_structure['extracted']) if not os.path.exists(directory_structure['formatted']): os.makedirs(directory_structure['formatted']) if args.dataset == 'bookscorpus': books_path = directory_structure['download'] + '/bookscorpus' #books_path = directory_structure['download'] output_filename = directory_structure[ 'formatted'] + '/bookscorpus_one_book_per_line.txt' books_formatter = BookscorpusTextFormatting.BookscorpusTextFormatting( books_path, output_filename, recursive=True) books_formatter.merge() elif args.dataset == 'wikicorpus_en': if args.skip_wikiextractor == 0: path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py' wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure[ 'download'] + '/' + args.dataset + '/wikicorpus_en.xml ' + '-b 100M --processes ' + str( args.n_processes) + ' -o ' + directory_structure[ 'extracted'] + '/' + args.dataset print('WikiExtractor Command:', wikiextractor_command) # wikiextractor_process = subprocess.run(wikiextractor_command, subprocess.run(wikiextractor_command, shell=True, check=True) #wikiextractor_process.communicate() wiki_path = directory_structure['extracted'] + '/wikicorpus_en' output_filename = directory_structure[ 'formatted'] + '/wikicorpus_en_one_article_per_line.txt' wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting( wiki_path, output_filename, recursive=True) wiki_formatter.merge() elif args.dataset == 'wikicorpus_zh': assert False, 'wikicorpus_zh not fully supported at this time. The simplified/tradition Chinese data needs to be translated and properly segmented still, and should work once this step is added.' if args.skip_wikiextractor == 0: path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py' wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure[ 'download'] + '/' + args.dataset + '/wikicorpus_zh.xml ' + '-b 100M --processes ' + str( args.n_processes) + ' -o ' + directory_structure[ 'extracted'] + '/' + args.dataset print('WikiExtractor Command:', wikiextractor_command) # wikiextractor_process = subprocess.run(wikiextractor_command, subprocess.run(wikiextractor_command, shell=True, check=True) #wikiextractor_process.communicate() wiki_path = directory_structure['extracted'] + '/wikicorpus_zh' output_filename = directory_structure[ 'formatted'] + '/wikicorpus_zh_one_article_per_line.txt' wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting( wiki_path, output_filename, recursive=True) wiki_formatter.merge() assert os.stat( output_filename ).st_size > 0, 'File glob did not pick up extracted wiki files from WikiExtractor.' elif args.action == 'sharding': # Note: books+wiki requires user to provide list of input_files (comma-separated with no spaces) if args.dataset == 'bookscorpus' or 'wikicorpus' in args.dataset or 'books_wiki' in args.dataset: if args.input_files is None: if args.dataset == 'bookscorpus': args.input_files = [ directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt' ] elif args.dataset == 'wikicorpus_en': args.input_files = [ directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt' ] elif args.dataset == 'wikicorpus_zh': args.input_files = [ directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt' ] elif args.dataset == 'books_wiki_en_corpus': args.input_files = [ directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt', directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt' ] output_file_prefix = directory_structure[ 'sharded'] + '/' + args.dataset + '/' + args.dataset if not os.path.exists(directory_structure['sharded']): os.makedirs(directory_structure['sharded']) if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset): os.makedirs(directory_structure['sharded'] + '/' + args.dataset) # Segmentation is here because all datasets look the same in one article/book/whatever per line format, and # it seemed unnecessarily complicated to add an additional preprocessing step to call just for this. # Different languages (e.g., Chinese simplified/traditional) may require translation and # other packages to be called from here -- just add a conditional branch for those extra steps segmenter = TextSharding.NLTKSegmenter() sharding = TextSharding.Sharding( args.input_files, output_file_prefix, args.n_training_shards, args.n_test_shards, args.fraction_test_set) sharding.load_articles() sharding.segment_articles_into_sentences(segmenter) sharding.distribute_articles_over_shards() sharding.write_shards_to_disk() else: assert False, 'Unsupported dataset for sharding' elif args.action == 'create_tfrecord_files': assert False, 'TFrecord creation not supported in this PyTorch model example release.' \ '' if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset): os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset) def create_record_worker(filename_prefix, shard_id, output_format='tfrecord'): bert_preprocessing_command = 'python /workspace/bert/create_pretraining_data.py' bert_preprocessing_command += ' --input_file=' + directory_structure[ 'sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str( shard_id) + '.txt' bert_preprocessing_command += ' --output_file=' + directory_structure[ 'tfrecord'] + '/' + args.dataset + '/' + filename_prefix + '_' + str( shard_id) + '.' + output_format bert_preprocessing_command += ' --vocab_file=' + args.vocab_file bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else '' bert_preprocessing_command += ' --max_seq_length=' + str( args.max_seq_length) bert_preprocessing_command += ' --max_predictions_per_seq=' + str( args.max_predictions_per_seq) bert_preprocessing_command += ' --masked_lm_prob=' + str( args.masked_lm_prob) bert_preprocessing_command += ' --random_seed=' + str( args.random_seed) bert_preprocessing_command += ' --dupe_factor=' + str( args.dupe_factor) bert_preprocessing_process = subprocess.Popen( bert_preprocessing_command, shell=True) last_process = bert_preprocessing_process # This could be better optimized (fine if all take equal time) if shard_id % args.n_processes == 0 and shard_id > 0: bert_preprocessing_process.wait() return last_process output_file_prefix = args.dataset for i in range(args.n_training_shards): last_process = create_record_worker( output_file_prefix + '_training', i) last_process.wait() for i in range(args.n_test_shards): last_process = create_record_worker(output_file_prefix + '_test', i) last_process.wait() elif args.action == 'create_hdf5_files': last_process = None if not os.path.exists(directory_structure['hdf5'] + "/" + args.dataset): os.makedirs(directory_structure['hdf5'] + "/" + args.dataset) def create_record_worker(filename_prefix, shard_id, output_format='hdf5'): bert_preprocessing_command = 'python /workspace/bert/create_pretraining_data.py' bert_preprocessing_command += ' --input_file=' + directory_structure[ 'sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str( shard_id) + '.txt' bert_preprocessing_command += ' --output_file=' + directory_structure[ 'hdf5'] + '/' + args.dataset + '/' + filename_prefix + '_' + str( shard_id) + '.' + output_format bert_preprocessing_command += ' --vocab_file=' + args.vocab_file bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else '' bert_preprocessing_command += ' --max_seq_length=' + str( args.max_seq_length) bert_preprocessing_command += ' --max_predictions_per_seq=' + str( args.max_predictions_per_seq) bert_preprocessing_command += ' --masked_lm_prob=' + str( args.masked_lm_prob) bert_preprocessing_command += ' --random_seed=' + str( args.random_seed) bert_preprocessing_command += ' --dupe_factor=' + str( args.dupe_factor) bert_preprocessing_process = subprocess.Popen( bert_preprocessing_command, shell=True) last_process = bert_preprocessing_process # This could be better optimized (fine if all take equal time) if shard_id % args.n_processes == 0 and shard_id > 0: bert_preprocessing_process.wait() return last_process output_file_prefix = args.dataset for i in range(args.n_training_shards): last_process = create_record_worker( output_file_prefix + '_training', i) last_process.wait() for i in range(args.n_test_shards): last_process = create_record_worker(output_file_prefix + '_test', i) last_process.wait() if __name__ == "__main__": parser = argparse.ArgumentParser( description='Preprocessing Application for Everything BERT-related') parser.add_argument( '--action', type=str, help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords', choices={ 'download', # Download and verify mdf5/sha sums 'text_formatting', # Convert into a file that contains one article/book per line 'sharding', # Convert previous formatted text into shards containing one sentence per line 'create_tfrecord_files', # Turn each shard into a TFrecord with masking and next sentence prediction info 'create_hdf5_files' # Turn each shard into a HDF5 file with masking and next sentence prediction info }) parser.add_argument( '--dataset', type=str, help='Specify the dataset to perform --action on', choices={ 'bookscorpus', 'wikicorpus_en', 'wikicorpus_zh', 'books_wiki_en_corpus', 'squad', 'all' }) parser.add_argument( '--input_files', type=str, help='Specify the input files in a comma-separated list (no spaces)') parser.add_argument( '--n_training_shards', type=int, help='Specify the number of training shards to generate', default=256) parser.add_argument( '--n_test_shards', type=int, help='Specify the number of test shards to generate', default=256) parser.add_argument( '--fraction_test_set', type=float, help='Specify the fraction (0..1) of the data to withhold for the test data split (based on number of sequences)', default=0.1) parser.add_argument( '--segmentation_method', type=str, help='Specify your choice of sentence segmentation', choices={'nltk'}, default='nltk') parser.add_argument( '--n_processes', type=int, help='Specify the max number of processes to allow at one time', default=4) parser.add_argument( '--random_seed', type=int, help='Specify the base seed to use for any random number generation', default=12345) parser.add_argument( '--dupe_factor', type=int, help='Specify the duplication factor', default=5) parser.add_argument( '--masked_lm_prob', type=float, help='Specify the probability for masked lm', default=0.15) parser.add_argument( '--max_seq_length', type=int, help='Specify the maximum sequence length', default=512) parser.add_argument( '--max_predictions_per_seq', type=int, help='Specify the maximum number of masked words per sequence', default=20) parser.add_argument( '--do_lower_case', type=int, help='Specify whether it is cased (0) or uncased (1) (any number greater than 0 will be treated as uncased)', default=1) parser.add_argument( '--vocab_file', type=str, help='Specify absolute path to vocab file to use)') parser.add_argument( '--skip_wikiextractor', type=int, help='Specify whether to skip wikiextractor step 0=False, 1=True', default=0) parser.add_argument( '--interactive_json_config_generator', type=str, help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords' ) main(parser.parse_args())
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner
triton_performance_runner
__init__
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .runner import TritonPerformanceRunner # noqa: F401
TensorFlow2/Segmentation/MaskRCNN/dataset
dataset
download_and_preprocess_coco
#!/bin/bash # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Script to download and preprocess the COCO data set for detection. # # The outputs of this script are TFRecord files containing serialized # tf.Example protocol buffers. See create_coco_tf_record.py for details of how # the tf.Example protocol buffers are constructed and see # http://cocodataset.org/#overview for an overview of the dataset. # # usage: # bash download_and_preprocess_coco.sh /data-dir/coco set -e set -x if [ -z "$1" ]; then echo "usage download_and_preprocess_coco.sh [data dir]" exit fi #sudo apt install -y protobuf-compiler python-pil python-lxml\ # python-pip python-dev git unzip #pip install Cython git+https://github.com/cocodataset/cocoapi#subdirectory=PythonAPI echo "Cloning Tensorflow models directory (for conversion utilities)" if [ ! -e tf-models ]; then git clone http://github.com/tensorflow/models tf-models fi (cd tf-models/research && protoc object_detection/protos/*.proto --python_out=.) UNZIP="unzip -nq" # Create the output directories. OUTPUT_DIR="${1%/}" SCRATCH_DIR="${OUTPUT_DIR}/raw-data" mkdir -p "${OUTPUT_DIR}" mkdir -p "${SCRATCH_DIR}" CURRENT_DIR=$(pwd) # Helper function to download and unpack a .zip file. function download_and_unzip() { local BASE_URL=${1} local FILENAME=${2} if [ ! -f ${FILENAME} ]; then echo "Downloading ${FILENAME} to $(pwd)" wget -nd -c "${BASE_URL}/${FILENAME}" else echo "Skipping download of ${FILENAME}" fi echo "Unzipping ${FILENAME}" ${UNZIP} ${FILENAME} } cd ${SCRATCH_DIR} # Download the images. BASE_IMAGE_URL="http://images.cocodataset.org/zips" TRAIN_IMAGE_FILE="train2017.zip" download_and_unzip ${BASE_IMAGE_URL} ${TRAIN_IMAGE_FILE} TRAIN_IMAGE_DIR="${SCRATCH_DIR}/train2017" VAL_IMAGE_FILE="val2017.zip" download_and_unzip ${BASE_IMAGE_URL} ${VAL_IMAGE_FILE} VAL_IMAGE_DIR="${SCRATCH_DIR}/val2017" TEST_IMAGE_FILE="test2017.zip" download_and_unzip ${BASE_IMAGE_URL} ${TEST_IMAGE_FILE} TEST_IMAGE_DIR="${SCRATCH_DIR}/test2017" # Download the annotations. BASE_INSTANCES_URL="http://images.cocodataset.org/annotations" INSTANCES_FILE="annotations_trainval2017.zip" download_and_unzip ${BASE_INSTANCES_URL} ${INSTANCES_FILE} TRAIN_OBJ_ANNOTATIONS_FILE="${SCRATCH_DIR}/annotations/instances_train2017.json" VAL_OBJ_ANNOTATIONS_FILE="${SCRATCH_DIR}/annotations/instances_val2017.json" TRAIN_CAPTION_ANNOTATIONS_FILE="${SCRATCH_DIR}/annotations/captions_train2017.json" VAL_CAPTION_ANNOTATIONS_FILE="${SCRATCH_DIR}/annotations/captions_val2017.json" # Download the test image info. BASE_IMAGE_INFO_URL="http://images.cocodataset.org/annotations" IMAGE_INFO_FILE="image_info_test2017.zip" download_and_unzip ${BASE_IMAGE_INFO_URL} ${IMAGE_INFO_FILE} TESTDEV_ANNOTATIONS_FILE="${SCRATCH_DIR}/annotations/image_info_test-dev2017.json" # # Build TFRecords of the image data. cd "${CURRENT_DIR}" # Setup packages touch tf-models/__init__.py touch tf-models/research/__init__.py # Run our conversion SCRIPT_DIR=$(dirname "$(readlink -f "$0")") PYTHONPATH="tf-models:tf-models/research" python $SCRIPT_DIR/create_coco_tf_record.py \ --logtostderr \ --include_masks \ --train_image_dir="${TRAIN_IMAGE_DIR}" \ --val_image_dir="${VAL_IMAGE_DIR}" \ --test_image_dir="${TEST_IMAGE_DIR}" \ --train_object_annotations_file="${TRAIN_OBJ_ANNOTATIONS_FILE}" \ --val_object_annotations_file="${VAL_OBJ_ANNOTATIONS_FILE}" \ --train_caption_annotations_file="${TRAIN_CAPTION_ANNOTATIONS_FILE}" \ --val_caption_annotations_file="${VAL_CAPTION_ANNOTATIONS_FILE}" \ --testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \ --output_dir="${OUTPUT_DIR}" mv ${SCRATCH_DIR}/annotations/ ${OUTPUT_DIR}
PyTorch/Detection/Efficientdet/scripts/D0
D0
train_FP32_8xV100-32G
#!/bin/bash function get_dataloader_workers { gpus=$(nvidia-smi -i 0 --query-gpu=count --format=csv,noheader) core=$(nproc --all) workers=$((core/gpus-2)) workers=$((workers>16?16:workers)) echo ${workers} } WORKERS=$(get_dataloader_workers) ./distributed_train.sh 8 /workspace/object_detection/datasets/coco --model efficientdet_d0 -b 30 --lr 0.375 --amp --opt fusedmomentum --warmup-epochs 20 --lr-noise 0.4 0.9 --output /model --worker ${WORKERS} --fill-color mean --model-ema --model-ema-decay 0.999 --eval-after 200 --epochs 300 --resume --smoothing 0.0 --pretrained-backbone-path /backbone_checkpoints/jocbackbone_statedict_B0.pth --memory-format nchw --sync-bn --fused-focal-loss --seed 12711
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers
layers
position_embedding
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras-based positional embedding layer.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.modeling import tf_utils # @tf.keras.utils.register_keras_serializable(package="Text") class PositionEmbedding(tf.keras.layers.Layer): """Creates a positional embedding. This layer creates a positional embedding as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). This layer can be set up to either create a statically shaped slice or a dynamically shaped slice. If `use_dynamic_slicing` is True, the input tensor can have a dynamic 1st dimension, while if `use_dynamic_slicing` is False the input size must be fixed. Attributes: use_dynamic_slicing: Whether to use the dynamic slicing path. max_sequence_length: The maximum size of the dynamic sequence. Only applicable if `use_dynamic_slicing` is True. initializer: The initializer to use for the embedding weights. Defaults to "glorot_uniform". """ def __init__(self, initializer="glorot_uniform", use_dynamic_slicing=False, max_sequence_length=None, **kwargs): # We need to have a default dtype of float32, since the inputs (which Keras # usually uses to infer the dtype) will always be int32. if "dtype" not in kwargs: kwargs["dtype"] = "float32" super(PositionEmbedding, self).__init__(**kwargs) if use_dynamic_slicing and max_sequence_length is None: raise ValueError( "If `use_dynamic_slicing` is True, `max_sequence_length` must be set." ) self._max_sequence_length = max_sequence_length self._initializer = tf.keras.initializers.get(initializer) self._use_dynamic_slicing = use_dynamic_slicing def get_config(self): config = { "max_sequence_length": self._max_sequence_length, "initializer": tf.keras.initializers.serialize(self._initializer), "use_dynamic_slicing": self._use_dynamic_slicing, } base_config = super(PositionEmbedding, self).get_config() return dict(list(base_config.items()) + list(config.items())) def build(self, input_shape): """Implements build() for the layer.""" dimension_list = input_shape.as_list() if len(dimension_list) != 3: raise ValueError("PositionEmbedding expects a 3-dimensional input tensor " "of shape [batch, sequence, width]") seq_length = dimension_list[1] width = dimension_list[2] # If we are not using dynamic slicing, we must assume that the sequence # length is fixed and max_sequence_length should not be specified. if not self._use_dynamic_slicing: if seq_length is None: raise ValueError( "PositionEmbedding must have `use_dynamic_slicing` set " "to True (and max_sequence_length set) when the " "sequence (1st) dimension of the input is None.") if self._max_sequence_length is not None: raise ValueError( "When `use_dynamic_slicing` is False, max_sequence_length should " "not be specified and we ought to use seq_length to get the " "variable shape.") if self._max_sequence_length is not None: weight_sequence_length = self._max_sequence_length else: weight_sequence_length = seq_length self._position_embeddings = self.add_weight( "embeddings", shape=[weight_sequence_length, width], initializer=self._initializer) super(PositionEmbedding, self).build(input_shape) def call(self, inputs): """Implements call() for the layer.""" if self._use_dynamic_slicing: input_shape = tf_utils.get_shape_list(inputs, expected_rank=3) seq_length = input_shape[1] width = input_shape[2] position_embeddings = tf.expand_dims( tf.slice(self._position_embeddings, [0, 0], [seq_length, width]), axis=0) else: position_embeddings = tf.expand_dims(self._position_embeddings, axis=0) return position_embeddings
PaddlePaddle/LanguageModeling/BERT
BERT
tokenizer
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals import collections import os import unicodedata from io import open def convert_to_unicode(text): """ Converts `text` to Unicode (if it's not already), assuming utf-8 input. Args: text(str|bytes): Text to be converted to unicode. Returns: str: converted text. """ if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError(f"Unsupported string type: {type(text)}") def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with open(vocab_file, "r", encoding="utf-8") as reader: while True: token = reader.readline() if not token: break token = token.strip() vocab[token] = index index += 1 return vocab def whitespace_tokenize(text): """ Runs basic whitespace cleaning and splitting on a peice of text. Args: text(str): Text to be tokened. Returns: tokens(list): Token list. """ text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizer: """Runs end-to-end tokenization: punctuation splitting + wordpiece""" pad_token = "[PAD]" def __init__(self, vocab_file, do_lower_case=True, max_len=512, never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path {vocab_file}") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict( [(ids, tok) for tok, ids in self.vocab.items()]) self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) self.max_len = max_len if max_len is not None else int(1e12) def tokenize(self, text): """Tokenize a piece of text.""" split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): """Converts a sequence of tokens into ids using the vocab.""" ids = [] for token in tokens: ids.append(self.vocab[token]) if len(ids) > self.max_len: raise ValueError( f"Token indices sequence length is longer than the specified maximum " f"sequence length for this BERT model ({len(ids)} > {self.max_len}). " f"Running this sequence through BERT will result in indexing errors" ) return ids def convert_ids_to_tokens(self, ids): """Converts a sequence of ids in wordpiece tokens using the vocab.""" tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens class BasicTokenizer: """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True, never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): """ Constructs a BasicTokenizer. Args: do_lower_case(bool, optional): Whether to lower case the input. """ self.do_lower_case = do_lower_case self.never_split = never_split def tokenize(self, text): """Tokenizes a piece of text.""" text = self._clean_text(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case and token not in self.never_split: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" if text in self.never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer: """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
PyTorch/LanguageModeling/BERT/triton/large/runner
runner
__main__
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import pathlib from typing import List if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ...runner.config import Config from ...runner.executor import Executor from ...runner.finalizer import ExperimentFinalizer from ...runner.maintainer import DockerMaintainer from ...runner.preparer import ExperimentPreparer from ...runner.runner_proxy import RunnerProxy from .pipeline_impl import pipeline class ExperimentRunner(RunnerProxy): """ Experiment Runner proxy for runner wrapper """ maintainer_cls = DockerMaintainer executor_cls = Executor preparer_cls = ExperimentPreparer finalizer_cls = ExperimentFinalizer def execute(config_path: str, devices: List[str]): if len(devices) == 0: devices = ["0"] config = Config.from_file(config_path) runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices) runner.start() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.") parser.add_argument( "--devices", type=str, nargs="*", required=False, help="Path to configuration file with details." ) args = parser.parse_args() config_path = args.config_path devices = args.devices execute(config_path, devices)
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils
utils
__init__
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa from .utils import * from .io_utils import *
PyTorch/Segmentation/MaskRCNN/pytorch/tools/cityscapes
cityscapes
convert_cityscapes_to_coco
#!/usr/bin/env python # Copyright (c) 2017-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## # This file is copy from https://github.com/facebookresearch/Detectron/tree/master/tools from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import h5py import json import os import scipy.misc import sys import cityscapesscripts.evaluation.instances2dict_with_polygons as cs import detectron.utils.segms as segms_util import detectron.utils.boxes as bboxs_util def parse_args(): parser = argparse.ArgumentParser(description='Convert dataset') parser.add_argument( '--dataset', help="cocostuff, cityscapes", default=None, type=str) parser.add_argument( '--outdir', help="output dir for json files", default=None, type=str) parser.add_argument( '--datadir', help="data dir for annotations to be converted", default=None, type=str) if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args() def convert_coco_stuff_mat(data_dir, out_dir): """Convert to png and save json with path. This currently only contains the segmentation labels for objects+stuff in cocostuff - if we need to combine with other labels from original COCO that will be a TODO.""" sets = ['train', 'val'] categories = [] json_name = 'coco_stuff_%s.json' ann_dict = {} for data_set in sets: file_list = os.path.join(data_dir, '%s.txt') images = [] with open(file_list % data_set) as f: for img_id, img_name in enumerate(f): img_name = img_name.replace('coco', 'COCO').strip('\n') image = {} mat_file = os.path.join( data_dir, 'annotations/%s.mat' % img_name) data = h5py.File(mat_file, 'r') labelMap = data.get('S') if len(categories) == 0: labelNames = data.get('names') for idx, n in enumerate(labelNames): categories.append( {"id": idx, "name": ''.join(chr(i) for i in data[ n[0]])}) ann_dict['categories'] = categories scipy.misc.imsave( os.path.join(data_dir, img_name + '.png'), labelMap) image['width'] = labelMap.shape[0] image['height'] = labelMap.shape[1] image['file_name'] = img_name image['seg_file_name'] = img_name image['id'] = img_id images.append(image) ann_dict['images'] = images print("Num images: %s" % len(images)) with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile: outfile.write(json.dumps(ann_dict)) # for Cityscapes def getLabelID(self, instID): if (instID < 1000): return instID else: return int(instID / 1000) def convert_cityscapes_instance_only( data_dir, out_dir): """Convert from cityscapes format to COCO instance seg format - polygons""" sets = [ 'gtFine_val', 'gtFine_train', 'gtFine_test', # 'gtCoarse_train', # 'gtCoarse_val', # 'gtCoarse_train_extra' ] ann_dirs = [ 'gtFine_trainvaltest/gtFine/val', 'gtFine_trainvaltest/gtFine/train', 'gtFine_trainvaltest/gtFine/test', # 'gtCoarse/train', # 'gtCoarse/train_extra', # 'gtCoarse/val' ] json_name = 'instancesonly_filtered_%s.json' ends_in = '%s_polygons.json' img_id = 0 ann_id = 0 cat_id = 1 category_dict = {} category_instancesonly = [ 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', ] for data_set, ann_dir in zip(sets, ann_dirs): print('Starting %s' % data_set) ann_dict = {} images = [] annotations = [] ann_dir = os.path.join(data_dir, ann_dir) for root, _, files in os.walk(ann_dir): for filename in files: if filename.endswith(ends_in % data_set.split('_')[0]): if len(images) % 50 == 0: print("Processed %s images, %s annotations" % ( len(images), len(annotations))) json_ann = json.load(open(os.path.join(root, filename))) image = {} image['id'] = img_id img_id += 1 image['width'] = json_ann['imgWidth'] image['height'] = json_ann['imgHeight'] image['file_name'] = filename[:-len( ends_in % data_set.split('_')[0])] + 'leftImg8bit.png' image['seg_file_name'] = filename[:-len( ends_in % data_set.split('_')[0])] + \ '%s_instanceIds.png' % data_set.split('_')[0] images.append(image) fullname = os.path.join(root, image['seg_file_name']) objects = cs.instances2dict_with_polygons( [fullname], verbose=False)[fullname] for object_cls in objects: if object_cls not in category_instancesonly: continue # skip non-instance categories for obj in objects[object_cls]: if obj['contours'] == []: print('Warning: empty contours.') continue # skip non-instance categories len_p = [len(p) for p in obj['contours']] if min(len_p) <= 4: print('Warning: invalid contours.') continue # skip non-instance categories ann = {} ann['id'] = ann_id ann_id += 1 ann['image_id'] = image['id'] ann['segmentation'] = obj['contours'] if object_cls not in category_dict: category_dict[object_cls] = cat_id cat_id += 1 ann['category_id'] = category_dict[object_cls] ann['iscrowd'] = 0 ann['area'] = obj['pixelCount'] ann['bbox'] = bboxs_util.xyxy_to_xywh( segms_util.polys_to_boxes( [ann['segmentation']])).tolist()[0] annotations.append(ann) ann_dict['images'] = images categories = [{"id": category_dict[name], "name": name} for name in category_dict] ann_dict['categories'] = categories ann_dict['annotations'] = annotations print("Num categories: %s" % len(categories)) print("Num images: %s" % len(images)) print("Num annotations: %s" % len(annotations)) with open(os.path.join(out_dir, json_name % data_set), 'w') as outfile: outfile.write(json.dumps(ann_dict)) if __name__ == '__main__': args = parse_args() if args.dataset == "cityscapes_instance_only": convert_cityscapes_instance_only(args.datadir, args.outdir) elif args.dataset == "cocostuff": convert_coco_stuff_mat(args.datadir, args.outdir) else: print("Dataset not supported: %s" % args.dataset)
PyTorch/LanguageModeling/BERT/distillation
distillation
losses
# coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch.nn import MSELoss, KLDivLoss, CosineEmbeddingLoss import math class TransformerLosses(): """Implements transformer specific loss functions for Knowledge Distillation. """ def __init__(self, student_config, teacher_config, device, args): self.mse_loss = MSELoss() self.kl_loss = KLDivLoss(reduction='batchmean') self.cosine_loss = CosineEmbeddingLoss() self.distill_config = student_config.distillation_config self.device = device self.student_config = student_config self.teacher_config = teacher_config self.batch_size = args.train_batch_size def compute_loss_(self, pred, target, loss_name): if self.distill_config[loss_name] == "mse": return self.mse_loss(pred, target) elif self.distill_config[loss_name] == "kld": seq_length = pred.size(0) if loss_name == "value_state_loss" else pred.size(-1) if loss_name == "value_state_loss": dk_student = pred.shape[-1] // self.student_config.num_attention_heads dk_teacher = target.shape[-1] // self.teacher_config.num_attention_heads # Old: (bsz, seq, heads * dk) => (bsz, heads, seq, dk) # New: (seq, bsz, heads * dk) => (bsz * heads, seq, dk) student_values = pred.view(seq_length, self.batch_size * self.student_config.num_attention_heads, dk_student) student_values = student_values.transpose(0, 1) teacher_values = target.view(seq_length, self.batch_size * self.teacher_config.num_attention_heads, dk_teacher) teacher_values = teacher_values.transpose(0, 1) # (..., seq, dk) x (..., dk, seq) pred = torch.bmm(student_values, student_values.transpose(1, 2)) / math.sqrt(dk_student) target = torch.bmm(teacher_values, teacher_values.transpose(1, 2)) / math.sqrt(dk_teacher) pred = pred.view(self.batch_size, self.student_config.num_attention_heads, seq_length, seq_length) target = target.view(self.batch_size, self.teacher_config.num_attention_heads, seq_length, seq_length) return self.kl_loss(torch.nn.LogSoftmax(dim=-1)(pred), torch.nn.Softmax(dim=-1)(target)) / ( self.student_config.num_attention_heads * seq_length) elif self.distill_config[loss_name] == "cosine": # seq_length = pred.size(0) # return self.cosine_loss(pred.transpose(0, 2).reshape(-1, seq_length), # target.transpose(0, 2).reshape(-1, seq_length), # torch.tensor([1]).to(self.device)) return self.cosine_loss(pred.view(-1, self.teacher_config.hidden_size), target.view(-1, self.teacher_config.hidden_size), torch.tensor([1]).to(self.device)) else: error_string = "'attention_loss':{} not defined. Choose among 'mse', 'cosine' or 'kld'".format( self.distill_config["attention_loss"]) raise ValueError(error_string) def compute_loss(self, pred, target, loss_name): loss = 0. for student, teacher in zip(pred, target): if loss_name == "attention_loss": student = torch.where(student <= -1e2, torch.zeros_like(student).to(self.device), student) teacher = torch.where(teacher <= -1e2, torch.zeros_like(teacher).to(self.device), teacher) tmp_loss = self.compute_loss_(student, teacher, loss_name) loss += tmp_loss return loss
TensorFlow2/Recommendation/WideAndDeep
WideAndDeep
gen_embedding_sizes
from data.feature_spec import FeatureSpec from data.outbrain.defaults import ONEHOT_CHANNEL, MULTIHOT_CHANNEL from argparse import ArgumentParser import random import json def parse_args(): parser = ArgumentParser() parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml', help='Name of the input feature specification file') parser.add_argument('--output', type=str) parser.add_argument('--max_size', type=int, default=256, help='Max embedding size to pick') return parser.parse_args() def main(): #this generator supports the following feature types: #onehot categorical #numerical #label #multihot categorical args = parse_args() fspec_in = FeatureSpec.from_yaml(args.feature_spec_in) max_size = args.max_size onehot_features = fspec_in.get_names_by_channel(ONEHOT_CHANNEL) multihot_features = fspec_in.get_names_by_channel(MULTIHOT_CHANNEL) sizes = {feature: random.randint(1,max_size) for feature in onehot_features+multihot_features} with open(args.output, "w") as opened: json.dump(sizes, opened) if __name__ == "__main__": main()
TensorFlow/Detection/SSD/models/research/object_detection/predictors
predictors
convolutional_keras_box_predictor
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Convolutional Box Predictors with and without weight sharing.""" import collections import tensorflow as tf from object_detection.core import box_predictor from object_detection.utils import static_shape keras = tf.keras.layers BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class _NoopVariableScope(object): """A dummy class that does not push any scope.""" def __enter__(self): return None def __exit__(self, exc_type, exc_value, traceback): return False class ConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor): """Convolutional Keras Box Predictor. Optionally add an intermediate 1x1 convolutional layer after features and predict in parallel branches box_encodings and class_predictions_with_background. Currently this box predictor assumes that predictions are "shared" across classes --- that is each anchor makes box predictions which do not depend on class. """ def __init__(self, is_training, num_classes, box_prediction_heads, class_prediction_heads, other_heads, conv_hyperparams, num_layers_before_predictor, min_depth, max_depth, freeze_batchnorm, inplace_batchnorm_update, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_heads: A list of heads that predict the boxes. class_prediction_heads: A list of heads that predict the classes. other_heads: A dictionary mapping head names to lists of convolutional heads. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. num_layers_before_predictor: Number of the additional conv layers before the predictor. min_depth: Minimum feature depth prior to predicting box encodings and class predictions. max_depth: Maximum feature depth prior to predicting box encodings and class predictions. If max_depth is set to 0, no additional feature map will be inserted before location and class predictions. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalBoxPredictor, self).__init__( is_training, num_classes, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, name=name) if min_depth > max_depth: raise ValueError('min_depth should be less than or equal to max_depth') if len(box_prediction_heads) != len(class_prediction_heads): raise ValueError('All lists of heads must be the same length.') for other_head_list in other_heads.values(): if len(box_prediction_heads) != len(other_head_list): raise ValueError('All lists of heads must be the same length.') self._prediction_heads = { BOX_ENCODINGS: box_prediction_heads, CLASS_PREDICTIONS_WITH_BACKGROUND: class_prediction_heads, } if other_heads: self._prediction_heads.update(other_heads) # We generate a consistent ordering for the prediction head names, # So that all workers build the model in the exact same order self._sorted_head_names = sorted(self._prediction_heads.keys()) self._conv_hyperparams = conv_hyperparams self._min_depth = min_depth self._max_depth = max_depth self._num_layers_before_predictor = num_layers_before_predictor self._shared_nets = [] def build(self, input_shapes): """Creates the variables of the layer.""" if len(input_shapes) != len(self._prediction_heads[BOX_ENCODINGS]): raise ValueError('This box predictor was constructed with %d heads,' 'but there are %d inputs.' % (len(self._prediction_heads[BOX_ENCODINGS]), len(input_shapes))) for stack_index, input_shape in enumerate(input_shapes): net = [] # Add additional conv layers before the class predictor. features_depth = static_shape.get_depth(input_shape) depth = max(min(features_depth, self._max_depth), self._min_depth) tf.logging.info( 'depth of additional conv before box predictor: {}'.format(depth)) if depth > 0 and self._num_layers_before_predictor > 0: for i in range(self._num_layers_before_predictor): net.append(keras.Conv2D(depth, [1, 1], name='SharedConvolutions_%d/Conv2d_%d_1x1_%d' % (stack_index, i, depth), padding='SAME', **self._conv_hyperparams.params())) net.append(self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_norm' % (stack_index, i, depth))) net.append(self._conv_hyperparams.build_activation_layer( name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_activation' % (stack_index, i, depth), )) # Until certain bugs are fixed in checkpointable lists, # this net must be appended only once it's been filled with layers self._shared_nets.append(net) self.built = True def _predict(self, image_features): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ predictions = collections.defaultdict(list) for (index, net) in enumerate(image_features): # Apply shared conv layers before the head predictors. for layer in self._shared_nets[index]: net = layer(net) for head_name in self._sorted_head_names: head_obj = self._prediction_heads[head_name][index] prediction = head_obj(net) predictions[head_name].append(prediction) return predictions
TensorFlow2/Recommendation/SIM/scripts
scripts
run_model
#!/bin/bash # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. usage() { cat <<EOF Usage: bash scripts/run_model.sh --data_path Data path. Default: /data. --gpus Number of gpus. --amp Use amp (0 or 1). --xla Use xla (0 or 1). --benchmark Use benchmark mode (0 or 1). --benchmark_steps Number of bench steps. --mode One of: train, inference. --epochs Number of epochs (only valid with mode=train). --batch_size Batch size. --results_dir Path to output directory. Default: /tmp/sim. --log_filename Name of output log file within results_dir. Default: log.json. --save_checkpoint_path Path to output checkpoint after training. --load_checkpoint_path Path from which to restore checkpoint for inference or suspend/resume training. --prebatch_train_size --prebatch_test_size EOF } if [ ! -d "scripts" ] || [ ! "$(ls -A 'scripts')" ]; then echo "[ERROR] You are probably calling this script from wrong directory" usage exit 1 fi gpus=${gpus:-1} data_path=${data_path:-/data} xla=${xla:-0} amp=${amp:-0} benchmark=${benchmark:-0} while [ $# -gt 0 ]; do if [[ $1 == *"--"* ]]; then param="${1/--/}" declare $param="$2" fi shift done xla_map_arg=("" "--xla") amp_map_arg=("" "--amp") benchmark_map_arg=("" "--benchmark") xla_arg=${xla_map_arg[$xla]} amp_arg=${amp_map_arg[$amp]} benchmark_arg=${benchmark_map_arg[$benchmark]} function get_option_or_use_default() { if [ -z $2 ] then echo "" else echo $1 $2 fi } data_path_option=$(get_option_or_use_default --dataset_dir $data_path) mode_option=$(get_option_or_use_default --mode $mode) benchmark_steps_option=$(get_option_or_use_default --benchmark_steps $benchmark_steps) batch_size_option=$(get_option_or_use_default --global_batch_size $batch_size) epochs_option=$(get_option_or_use_default --epochs $epochs) results_dir_option=$(get_option_or_use_default --results_dir $results_dir) log_filename_option=$(get_option_or_use_default --log_filename $log_filename) save_checkpoint_path_option=$(get_option_or_use_default --save_checkpoint_path $save_checkpoint_path) load_checkpoint_path_option=$(get_option_or_use_default --load_checkpoint_path $load_checkpoint_path) prebatch_train_size_option=$(get_option_or_use_default --prebatch_train_size $prebatch_train_size) prebatch_test_size_option=$(get_option_or_use_default --prebatch_test_size $prebatch_test_size) command="mpiexec --allow-run-as-root --bind-to socket -np ${gpus} python main.py --dataset_dir ${data_path} --drop_remainder ${epochs_option} ${xla_arg} ${amp_arg} ${benchmark_arg} ${mode_option} ${benchmark_steps_option} ${batch_size_option} ${results_dir_option} ${log_filename_option} ${save_checkpoint_path_option} ${load_checkpoint_path_option} ${prebatch_train_size_option} ${prebatch_test_size_option}" printf "[INFO] Running:\n%s\n" "${command}" # run $command
TensorFlow/Segmentation/UNet_Medical/model
model
unet
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Model construction utils This module provides a convenient way to create different topologies based around UNet. """ import tensorflow as tf from model.layers import output_block, upsample_block, bottleneck, downsample_block, input_block def unet_v1(features, mode): """ U-Net: Convolutional Networks for Biomedical Image Segmentation Source: https://arxiv.org/pdf/1505.04597 """ skip_connections = [] out, skip = input_block(features, filters=64) skip_connections.append(skip) for idx, filters in enumerate([128, 256, 512]): out, skip = downsample_block(out, filters=filters, idx=idx) skip_connections.append(skip) out = bottleneck(out, filters=1024, mode=mode) for idx, filters in enumerate([512, 256, 128]): out = upsample_block(out, residual_input=skip_connections.pop(), filters=filters, idx=idx) return output_block(out, residual_input=skip_connections.pop(), filters=64, n_classes=2)
PyTorch/Translation/GNMT/seq2seq
seq2seq
utils
# Copyright (c) 2017 Elad Hoffer # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import logging.config import os import random import sys import time from contextlib import contextmanager import dllogger import numpy as np import torch import torch.distributed as dist import torch.nn.init as init import torch.utils.collect_env def init_lstm_(lstm, init_weight=0.1): """ Initializes weights of LSTM layer. Weights and biases are initialized with uniform(-init_weight, init_weight) distribution. :param lstm: instance of torch.nn.LSTM :param init_weight: range for the uniform initializer """ # Initialize hidden-hidden weights init.uniform_(lstm.weight_hh_l0.data, -init_weight, init_weight) # Initialize input-hidden weights: init.uniform_(lstm.weight_ih_l0.data, -init_weight, init_weight) # Initialize bias. PyTorch LSTM has two biases, one for input-hidden GEMM # and the other for hidden-hidden GEMM. Here input-hidden bias is # initialized with uniform distribution and hidden-hidden bias is # initialized with zeros. init.uniform_(lstm.bias_ih_l0.data, -init_weight, init_weight) init.zeros_(lstm.bias_hh_l0.data) if lstm.bidirectional: init.uniform_(lstm.weight_hh_l0_reverse.data, -init_weight, init_weight) init.uniform_(lstm.weight_ih_l0_reverse.data, -init_weight, init_weight) init.uniform_(lstm.bias_ih_l0_reverse.data, -init_weight, init_weight) init.zeros_(lstm.bias_hh_l0_reverse.data) def generate_seeds(rng, size): """ Generate list of random seeds :param rng: random number generator :param size: length of the returned list """ seeds = [rng.randint(0, 2**32 - 1) for _ in range(size)] return seeds def broadcast_seeds(seeds, device): """ Broadcasts random seeds to all distributed workers. Returns list of random seeds (broadcasted from workers with rank 0). :param seeds: list of seeds (integers) :param device: torch.device """ if torch.distributed.is_available() and torch.distributed.is_initialized(): seeds_tensor = torch.tensor(seeds, dtype=torch.int64, device=device) torch.distributed.broadcast(seeds_tensor, 0) seeds = seeds_tensor.tolist() return seeds def setup_seeds(master_seed, epochs, device): """ Generates seeds from one master_seed. Function returns (worker_seeds, shuffling_seeds), worker_seeds are later used to initialize per-worker random number generators (mostly for dropouts), shuffling_seeds are for RNGs resposible for reshuffling the dataset before each epoch. Seeds are generated on worker with rank 0 and broadcasted to all other workers. :param master_seed: master RNG seed used to initialize other generators :param epochs: number of epochs :param device: torch.device (used for distributed.broadcast) """ if master_seed is None: # random master seed, random.SystemRandom() uses /dev/urandom on Unix master_seed = random.SystemRandom().randint(0, 2**32 - 1) if get_rank() == 0: # master seed is reported only from rank=0 worker, it's to avoid # confusion, seeds from rank=0 are later broadcasted to other # workers logging.info(f'Using random master seed: {master_seed}') else: # master seed was specified from command line logging.info(f'Using master seed from command line: {master_seed}') # initialize seeding RNG seeding_rng = random.Random(master_seed) # generate worker seeds, one seed for every distributed worker worker_seeds = generate_seeds(seeding_rng, get_world_size()) # generate seeds for data shuffling, one seed for every epoch shuffling_seeds = generate_seeds(seeding_rng, epochs) # broadcast seeds from rank=0 to other workers worker_seeds = broadcast_seeds(worker_seeds, device) shuffling_seeds = broadcast_seeds(shuffling_seeds, device) return worker_seeds, shuffling_seeds def barrier(): """ Call torch.distributed.barrier() if distritubed is in use, else calls torch.cuda.synchronize() if CUDA is initialized. """ if torch.distributed.is_available() and torch.distributed.is_initialized(): torch.distributed.barrier() elif torch.cuda.is_available() and torch.cuda.is_initialized(): torch.cuda.synchronize() def get_rank(): """ Gets distributed rank or returns zero if distributed is not initialized. """ if torch.distributed.is_available() and torch.distributed.is_initialized(): rank = torch.distributed.get_rank() else: rank = 0 return rank def get_world_size(): """ Gets total number of distributed workers or returns one if distributed is not initialized. """ if torch.distributed.is_available() and torch.distributed.is_initialized(): world_size = torch.distributed.get_world_size() else: world_size = 1 return world_size @contextmanager def sync_workers(): """ Yields distributed rank and synchronizes all workers on exit. """ rank = get_rank() yield rank barrier() @contextmanager def timer(name, ndigits=2, sync_gpu=True): if sync_gpu: torch.cuda.synchronize() start = time.time() yield if sync_gpu: torch.cuda.synchronize() stop = time.time() elapsed = round(stop - start, ndigits) logging.info(f'TIMER {name} {elapsed}') def setup_logging(log_all_ranks=True, log_file=os.devnull): """ Configures logging. By default logs from all workers are printed to the console, entries are prefixed with "N: " where N is the rank of the worker. Logs printed to the console don't include timestaps. Full logs with timestamps are saved to the log_file file. """ class RankFilter(logging.Filter): def __init__(self, rank, log_all_ranks): self.rank = rank self.log_all_ranks = log_all_ranks def filter(self, record): record.rank = self.rank if self.log_all_ranks: return True else: return (self.rank == 0) rank = get_rank() rank_filter = RankFilter(rank, log_all_ranks) for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) handler.close() logging_format = "%(asctime)s - %(levelname)s - %(rank)s - %(message)s" logging.basicConfig(level=logging.DEBUG, format=logging_format, datefmt="%Y-%m-%d %H:%M:%S", filename=log_file, filemode='w') console = logging.StreamHandler(sys.stdout) console.setLevel(logging.INFO) formatter = logging.Formatter('%(rank)s: %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) logging.getLogger('').addFilter(rank_filter) def setup_dllogger(enabled=True, filename=os.devnull): rank = get_rank() if enabled and rank == 0: backends = [ dllogger.JSONStreamBackend( dllogger.Verbosity.VERBOSE, filename, ), ] dllogger.init(backends) else: dllogger.init([]) dllogger.metadata("test_bleu", {"unit": None}) dllogger.metadata("eval_90%_latency", {"unit": "ms"}) dllogger.metadata("eval_avg_latency", {"unit": "ms"}) dllogger.metadata("train_elapsed", {"unit": "s"}) dllogger.metadata("eval_throughput", {"unit": "tokens/s"}) dllogger.metadata("train_throughput", {"unit": "tokens/s"}) def set_device(cuda, local_rank): """ Sets device based on local_rank and returns instance of torch.device. :param cuda: if True: use cuda :param local_rank: local rank of the worker """ if cuda: torch.cuda.set_device(local_rank) device = torch.device('cuda') else: device = torch.device('cpu') return device def init_distributed(cuda): """ Initializes distributed backend. :param cuda: (bool) if True initializes nccl backend, if False initializes gloo backend """ world_size = int(os.environ.get('WORLD_SIZE', 1)) distributed = (world_size > 1) if distributed: backend = 'nccl' if cuda else 'gloo' dist.init_process_group(backend=backend, init_method='env://') assert dist.is_initialized() return distributed def log_env_info(): """ Prints information about execution environment. """ logging.info('Collecting environment information...') env_info = torch.utils.collect_env.get_pretty_env_info() logging.info(f'{env_info}') def pad_vocabulary(math): if math == 'tf32' or math == 'fp16' or math == 'manual_fp16': pad_vocab = 8 elif math == 'fp32': pad_vocab = 1 return pad_vocab def benchmark(test_acc, target_acc, test_perf, target_perf): def test(achieved, target, name): passed = True if target is not None and achieved is not None: logging.info(f'{name} achieved: {achieved:.2f} ' f'target: {target:.2f}') if achieved >= target: logging.info(f'{name} test passed') else: logging.info(f'{name} test failed') passed = False return passed passed = True passed &= test(test_acc, target_acc, 'Accuracy') passed &= test(test_perf, target_perf, 'Performance') return passed def debug_tensor(tensor, name): """ Simple utility which helps with debugging. Takes a tensor and outputs: min, max, avg, std, number of NaNs, number of INFs. :param tensor: torch tensor :param name: name of the tensor (only for logging) """ logging.info(name) tensor = tensor.detach().float().cpu().numpy() logging.info(f'MIN: {tensor.min()} MAX: {tensor.max()} ' f'AVG: {tensor.mean()} STD: {tensor.std()} ' f'NAN: {np.isnan(tensor).sum()} INF: {np.isinf(tensor).sum()}') class AverageMeter: """ Computes and stores the average and current value """ def __init__(self, warmup=0, keep=False): self.reset() self.warmup = warmup self.keep = keep def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 self.iters = 0 self.vals = [] def update(self, val, n=1): self.iters += 1 self.val = val if self.iters > self.warmup: self.sum += val * n self.count += n self.avg = self.sum / self.count if self.keep: self.vals.append(val) def reduce(self, op): """ Reduces average value over all workers. :param op: 'sum' or 'mean', reduction operator """ if op not in ('sum', 'mean'): raise NotImplementedError distributed = (get_world_size() > 1) if distributed: backend = dist.get_backend() cuda = (backend == dist.Backend.NCCL) if cuda: avg = torch.cuda.FloatTensor([self.avg]) _sum = torch.cuda.FloatTensor([self.sum]) else: avg = torch.FloatTensor([self.avg]) _sum = torch.FloatTensor([self.sum]) dist.all_reduce(avg) dist.all_reduce(_sum) self.avg = avg.item() self.sum = _sum.item() if op == 'mean': self.avg /= get_world_size() self.sum /= get_world_size()
PyTorch/Translation/Transformer/fairseq/optim
optim
adam
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. # #------------------------------------------------------------------------- # # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import FairseqOptimizer, register_optimizer from apex.optimizers.fused_adam import FusedAdam @register_optimizer('adam') class FairseqAdam(FairseqOptimizer): def __init__(self, args, params): super().__init__(args, params) self._optimizer = FusedAdam(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" parser.add_argument('--adam-betas', default=(0.9, 0.999), nargs=2, type=float, metavar='B1 B2', help='betas for Adam optimizer') parser.add_argument('--adam-eps', type=float, default=1e-8, metavar='D', help='epsilon for Adam optimizer') @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'betas': self.args.adam_betas, 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay, }
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/graph_aligner
graph_aligner
xgboost_aligner
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle import logging import os import warnings from collections import defaultdict from pathlib import PosixPath from typing import Dict, Union, Literal import cudf import cupy import numpy as np import pandas as pd import xgboost try: from cuml.preprocessing import LabelEncoder from pylibraft.random import rmat # rmat needs to be imported before cuml except ImportError: from sklearn.preprocessing import OrdinalEncoder as LabelEncoder from syngen.graph_aligner.base_graph_aligner import BaseGraphAligner from syngen.graph_aligner.utils import ( get_graph, get_preproc_dict, get_preproc_fn, merge_dfs, spread_ranks, merge_graph_vertex_feat, ) from syngen.graph_aligner.utils import get_features as default_features from syngen.utils.types import ColumnType, DataFrameType, MetaData from syngen.utils.utils import df_to_cudf, df_to_pandas # - suppress numba in debug mode numba_logger = logging.getLogger("numba") numba_logger.setLevel(logging.WARNING) warnings.filterwarnings('ignore') class XGBoostAligner(BaseGraphAligner): """Aligns two graphs via correlating structural graph features and tabular features using a xgboost predictor. Args: xgboost_params: `dict` key-value parameters to pass to `xgboost.train`. To use different parameters for each feature pass a `dict` of `dict` corresponding to each feature, with keys as the feature name and values as the xgboost_params. num_boost_round: `dict` or int number of boosting rounds for xgboost. The same `num_boost_round` is used for all features unless a `dict` with keys as feature name and values as `num_boost_round` is passed. batch_size: int the size of the chunk during the alignment process topk: int the number of candidates with the highest ranks to be chosen from during alignment """ def __init__( self, xgboost_params: Union[Dict[str, dict], dict] = { "learning_rate": 0.1, "colsample_bytree": 0.3, "max_depth": 5, "n_estimators": 100, "alpha": 10, "tree_method": "gpu_hist", }, num_boost_round: Union[Dict[str, int], int] = 10, batch_size: int = 100000, topk: int = 4, get_features=default_features, verbose=False, **kwargs, ): self.xgboost_params = xgboost_params self.num_boost_round = num_boost_round self.batch_size = batch_size self.topk = topk self.col_maps_edge = None self.col_maps_node = None self.get_features = get_features self.verbose = verbose self.xgboost_params['verbosity'] = int(xgboost_params.get('verbosity', self.verbose)) self.xgboost_params['silent'] = int(xgboost_params.get('silent', not self.verbose)) self.features_to_correlate_edge = None self.features_to_correlate_node = None self.col_maps_edge = None self.col_maps_node = None self.meta_dict_edge = None self.meta_dict_node = None self.edge_trained_models = None self.node_trained_models = None def _extract_structural_features(self, graphs): structural_features = {} for graph_name, graph_info in graphs.items(): is_hetero = graph_info[MetaData.SRC_NODE_TYPE] != graph_info[MetaData.DST_NODE_TYPE] if is_hetero: offset = graph_info['src_size'] + 10 graph_info[MetaData.STRUCTURE_DATA][:, 1] = graph_info[MetaData.STRUCTURE_DATA][:, 1] + offset edge_list_df = cudf.DataFrame(graph_info[MetaData.STRUCTURE_DATA], columns=["src", "dst"]) graph = get_graph(edge_list_df, src="src", dst="dst").to_undirected() graph_feat_dfs = self.get_features(edge_list_df, graph, src="src", dst="dst") graph_feat_df = merge_dfs(graph_feat_dfs, on="vertex") graph_feat_df = graph_feat_df.fillna(0) if is_hetero: src_nodes = graph_feat_df['vertex'] <= graph_info['src_size'] structural_features[graph_info[MetaData.SRC_NODE_TYPE]] = merge_graph_vertex_feat( structural_features.get(graph_info[MetaData.SRC_NODE_TYPE]), graph_feat_df.loc[src_nodes]) dst_nodes = graph_feat_df['vertex'] > graph_info['src_size'] dst_graph_feat_df = graph_feat_df.loc[dst_nodes] dst_graph_feat_df["vertex"] -= offset structural_features[graph_info[MetaData.DST_NODE_TYPE]] = merge_graph_vertex_feat( structural_features.get(graph_info[MetaData.DST_NODE_TYPE]), dst_graph_feat_df) graph_info[MetaData.STRUCTURE_DATA][:, 1] = graph_info[MetaData.STRUCTURE_DATA][:, 1] - offset else: structural_features[graph_info[MetaData.SRC_NODE_TYPE]] = merge_graph_vertex_feat( structural_features.get(graph_info[MetaData.SRC_NODE_TYPE]), graph_feat_df) for _, df in structural_features.items(): df['vertex'] = df['vertex'].values.astype(int) df.set_index('vertex', inplace=True) return structural_features def fit( self, graphs, node_features, edge_features, **kwargs, ): structural_features = self._extract_structural_features(graphs) self._fit_node(node_features, structural_features) self._fit_edge(edge_features, structural_features, graphs) def _fit_edge( self, edge_features, structural_features, graphs ): self.features_to_correlate_edge = {} self.edge_trained_models = {} self.col_maps_edge = {} self.meta_dict_edge = {} for edge_name, edge_features_data in edge_features.items(): self.features_to_correlate_edge[edge_name] = {} cat_cols = edge_features_data[MetaData.CATEGORICAL_COLUMNS] cont_columns = list(set(edge_features_data[MetaData.FEATURES_LIST]) - set(cat_cols)) for c in cat_cols: self.features_to_correlate_edge[edge_name][c] = MetaData.CATEGORICAL for c in cont_columns: self.features_to_correlate_edge[edge_name][c] = MetaData.CONTINUOUS self.meta_dict_edge[edge_name] = defaultdict(None) preproc_dict = get_preproc_dict(self.features_to_correlate_edge[edge_name]) for feat, v in preproc_dict.items(): preproc_fn = get_preproc_fn(v["preproc"]) edge_features_data[MetaData.FEATURES_DATA][feat], meta = \ preproc_fn(edge_features_data[MetaData.FEATURES_DATA][feat]) self.meta_dict_edge[feat] = meta graph_info = graphs[edge_name] edge_list = graph_info[MetaData.STRUCTURE_DATA] src_ids = edge_list[:, 0] dst_ids = edge_list[:, 1] src_struct_feat = structural_features[graph_info[MetaData.SRC_NODE_TYPE]].loc[src_ids].values dst_struct_feat = structural_features[graph_info[MetaData.DST_NODE_TYPE]].loc[dst_ids].values X_train = np.concatenate([src_struct_feat, dst_struct_feat], axis=1).astype(float) self.edge_trained_models[edge_name] = {} self.col_maps_edge[edge_name] = {} edge_features_df = cudf.DataFrame.from_pandas(edge_features_data[MetaData.FEATURES_DATA]) for col_name, col_type in self.features_to_correlate_edge[edge_name].items(): if col_name in self.xgboost_params: xgboost_params = dict(self.xgboost_params[col_name]) else: xgboost_params = dict(self.xgboost_params) y_train = edge_features_df[col_name] if "objective" not in xgboost_params: if col_type == ColumnType.CONTINUOUS: xgboost_params["objective"] = "reg:squarederror" elif col_type == ColumnType.CATEGORICAL: xgboost_params["objective"] = "multi:softmax" vals = edge_features_df[col_name] encoder = LabelEncoder() encoder.fit(vals) self.col_maps_edge[edge_name][col_name] = encoder num_classes = len(encoder.classes_) xgboost_params["num_class"] = num_classes y_train = encoder.transform(y_train) y_train = y_train.values dtrain = xgboost.DMatrix(X_train, y_train) # - train the model trained_model = xgboost.train( xgboost_params, dtrain, num_boost_round=self.num_boost_round, evals=[(dtrain, "train")], verbose_eval=self.verbose, ) self.edge_trained_models[edge_name][col_name] = trained_model def _fit_node( self, node_features, structural_features ): self.features_to_correlate_node = {} self.node_trained_models = {} self.col_maps_node = {} self.meta_dict_node = {} # fit nodes for node_name, node_features_data in node_features.items(): self.features_to_correlate_node[node_name] = {} cat_cols = node_features_data[MetaData.CATEGORICAL_COLUMNS] cont_columns = list(set(node_features_data[MetaData.FEATURES_LIST]) - set(cat_cols)) for c in cat_cols: self.features_to_correlate_node[node_name][c] = MetaData.CATEGORICAL for c in cont_columns: self.features_to_correlate_node[node_name][c] = MetaData.CONTINUOUS self.meta_dict_node[node_name] = defaultdict(None) preproc_dict = get_preproc_dict(self.features_to_correlate_node[node_name]) for feat, v in preproc_dict.items(): preproc_fn = get_preproc_fn(v["preproc"]) node_features_data[MetaData.FEATURES_DATA][feat], meta = \ preproc_fn(node_features_data[MetaData.FEATURES_DATA][feat]) self.meta_dict_node[feat] = meta nodes = structural_features[node_name].index.values.astype(int) node_struct_feat = structural_features[node_name].loc[nodes].values X_train = node_struct_feat.astype(float) self.node_trained_models[node_name] = {} self.col_maps_node[node_name] = {} node_features_df = cudf.DataFrame.from_pandas(node_features_data[MetaData.FEATURES_DATA]) for col_name, col_type in self.features_to_correlate_node[node_name].items(): if col_name in self.xgboost_params: xgboost_params = dict(self.xgboost_params[col_name]) else: xgboost_params = dict(self.xgboost_params) y_train = node_features_df[col_name].loc[nodes] if "objective" not in xgboost_params: if col_type == ColumnType.CONTINUOUS: xgboost_params["objective"] = "reg:squarederror" elif col_type == ColumnType.CATEGORICAL: xgboost_params["objective"] = "multi:softmax" vals = node_features_df[col_name].loc[nodes] encoder = LabelEncoder() encoder.fit(vals) self.col_maps_node[node_name][col_name] = encoder num_classes = len(encoder.classes_) xgboost_params["num_class"] = num_classes y_train = encoder.transform(y_train) y_train = y_train.values dtrain = xgboost.DMatrix(X_train, y_train) trained_model = xgboost.train( xgboost_params, dtrain, num_boost_round=self.num_boost_round, evals=[(dtrain, "train")], verbose_eval=self.verbose, ) self.node_trained_models[node_name][col_name] = trained_model def align( self, graphs, node_features, edge_features, ) -> pd.DataFrame: structural_features = self._extract_structural_features(graphs) for k, v in structural_features.items(): structural_features[k] = df_to_pandas(v) res = { MetaData.NODES: {}, MetaData.EDGES: {}, } if self.features_to_correlate_node: res[MetaData.NODES] = self._align( structural_features, node_features, None, self.features_to_correlate_node, self.col_maps_node, self.node_trained_models, MetaData.NODES, ) if self.features_to_correlate_edge: res[MetaData.EDGES] = self._align( structural_features, edge_features, graphs, self.features_to_correlate_edge, self.col_maps_edge, self.edge_trained_models, MetaData.EDGES, ) return res def _align( self, structural_features, tab_features, graphs, features_to_correlate_part, col_maps, trained_models: Dict[str, xgboost.Booster], part: Literal[MetaData.NODES, MetaData.EDGES], ) -> Dict[str, pd.DataFrame]: result_dict = {} for part_name, features_to_correlate in features_to_correlate_part.items(): preproc_dict = get_preproc_dict(features_to_correlate) if part == MetaData.NODES: split_df = structural_features[part_name] elif part == MetaData.EDGES: split_df = graphs[part_name][MetaData.STRUCTURE_DATA] else: raise ValueError(f"Only `{MetaData.NODES}` and `{MetaData.EDGES}` parts expected, got ({part})") topk = min(len(split_df), self.topk) batch_size = self.batch_size if len(split_df) // batch_size == 0: batch_size = len(split_df) chunks = np.array_split(split_df, len(split_df) // batch_size) all_preds = [] for chunk in chunks: if part == MetaData.NODES: node_feat = chunk.values X_test = node_feat.astype(float) dtest = xgboost.DMatrix(X_test) elif part == MetaData.EDGES: src_ids = chunk[:, 0] dst_ids = chunk[:, 1] src_struct_feat = structural_features[graphs[part_name][MetaData.SRC_NODE_TYPE]].loc[src_ids].values dst_struct_feat = structural_features[graphs[part_name][MetaData.DST_NODE_TYPE]].loc[dst_ids].values X_test = np.concatenate([src_struct_feat, dst_struct_feat], axis=1).astype(float) dtest = xgboost.DMatrix(X_test) col_preds = [] for col_name, col_type in features_to_correlate.items(): preds = trained_models[part_name][col_name].predict(dtest) col_preds.append(preds.reshape(-1, 1)) col_preds = np.concatenate(col_preds, axis=1) all_preds.append(col_preds) all_preds = np.concatenate(all_preds, axis=0) all_preds = cupy.asarray(all_preds) target_cols = list(features_to_correlate.keys()) y_generated = [] for col_name, col_type in features_to_correlate.items(): preproc_fn = None if preproc_dict: try: preproc_fn = get_preproc_fn( preproc_dict[col_name]["preproc"] ) except: pass y = tab_features[part_name][col_name] if preproc_fn is not None: y, _ = preproc_fn(y) if col_type == ColumnType.CATEGORICAL: y = col_maps[part_name][col_name].inverse_transform(y) y_generated.append(cudf.Series(y)) y_generated = cudf.concat(y_generated, axis=1).values ranks = cupy.zeros((len(split_df), 1)) if len(target_cols) == 1: y_generated = y_generated.reshape(-1) target_col = target_cols[0] col_type = features_to_correlate[target_col] if col_type == ColumnType.CATEGORICAL: all_preds = col_maps[part_name][target_col].inverse_transform( cudf.Series(all_preds) ) all_preds = all_preds.values unique_preds = cupy.unique(all_preds) unique_preds = cupy.asnumpy(unique_preds) unique_generated = cupy.unique(y_generated) present_unique = [ up for up in unique_preds if up in unique_generated ] idxs = cupy.arange(0, len(y_generated)) pred_assigned = cupy.zeros(len(all_preds), dtype="bool") gen_assigned = cupy.zeros(len(y_generated), dtype="bool") unassigned_idxs_pred = [] for up in present_unique: sel_idxs = idxs[y_generated == up] cupy.random.shuffle(sel_idxs) ups_mask = (all_preds == up).squeeze() num_ups = cupy.sum(ups_mask) if len(sel_idxs) > num_ups: r_idxs = sel_idxs[:num_ups] ranks[ups_mask] = r_idxs.reshape(-1, 1) pred_assigned[ups_mask] = True gen_assigned[sel_idxs[:num_ups]] = True else: r_idxs = cupy.where(ups_mask)[0] ra_idxs = r_idxs[: len(sel_idxs)] ranks[ra_idxs] = sel_idxs.reshape(-1, 1) ups_mask[ra_idxs] = False unassigned_idxs = ra_idxs[len(sel_idxs):] unassigned_idxs_pred.append(unassigned_idxs) pred_assigned[ra_idxs] = True gen_assigned[sel_idxs] = True ranks[~pred_assigned] = idxs[~gen_assigned][: cupy.sum(~pred_assigned)].reshape(-1, 1) elif col_type == ColumnType.CONTINUOUS: y_generated = cupy.ravel(y_generated) y_idxsort = cupy.argsort(y_generated) y_generated_sorted = y_generated[y_idxsort] ranking = cupy.searchsorted(y_generated_sorted, all_preds) ranks = y_idxsort[ranking] ranks = spread_ranks(ranks) elif len(target_cols) > 1: y_generated = y_generated / ( cupy.linalg.norm(y_generated, ord=2, axis=1).reshape(-1, 1) ) chunks = cupy.array_split(all_preds, len(all_preds) // batch_size) for idx, chunk in enumerate(chunks): idxs = cupy.ones((len(y_generated),), dtype=bool) chunk = chunk / cupy.linalg.norm(chunk, ord=2, axis=1).reshape( -1, 1 ) sim = cupy.einsum("ij,kj->ik", chunk, y_generated) chunk_ranks = cupy.argsort(sim, axis=1)[:, -topk:] rand_sel = cupy.random.randint(0, topk, len(chunk_ranks)) chunk_ranks = chunk_ranks[ cupy.arange(len(chunk_ranks)), rand_sel ] cupy.put(idxs, chunk_ranks, False) y_generated = y_generated[idxs] ranks[ idx * batch_size: idx * batch_size + len(chunk) ] = chunk_ranks.reshape(-1, 1) ranks[ranks >= len(tab_features[part_name])] = len(tab_features[part_name]) - 1 ranks = cupy.asnumpy(ranks) ranks = ranks.squeeze() features = tab_features[part_name].iloc[ranks].reset_index(drop=True) result_dict[part_name] = features return result_dict def save(self, save_dir: Union[PosixPath, str]): if not os.path.exists(save_dir): os.makedirs(save_dir) if self.edge_trained_models: for edge_name, models in self.edge_trained_models.items(): for col_name, model in models.items(): model.save_model( os.path.join(save_dir, f"{edge_name}___{col_name}___xgb_aligner_edge.json") ) if self.node_trained_models: for node_name, models in self.node_trained_models.items(): for col_name, model in models.items(): model.save_model( os.path.join(save_dir, f"{node_name}___{col_name}___xgb_aligner_node.json") ) meta_data = { "xgboost_params": self.xgboost_params, "num_boost_round": self.num_boost_round, "batch_size": self.batch_size, "topk": self.topk, "get_features": self.get_features, "verbose": self.verbose, "fitted_data": { "features_to_correlate_edge": self.features_to_correlate_edge, "features_to_correlate_node": self.features_to_correlate_node, "col_maps_edge": self.col_maps_edge, "col_maps_node": self.col_maps_node, "meta_dict_edge": self.meta_dict_edge, "meta_dict_node": self.meta_dict_node, } } with open(os.path.join(save_dir, "xgb_aligner_meta.pkl"), "wb") as file_handler: pickle.dump(meta_data, file_handler, protocol=pickle.HIGHEST_PROTOCOL) @classmethod def load(cls, dir_path: Union[PosixPath, str]): with open(os.path.join(dir_path, "xgb_aligner_meta.pkl"), "rb") as file_handler: meta_data = pickle.load(file_handler) fitted_data = meta_data.pop('fitted_data') instance = cls(**meta_data) for k, v in fitted_data.items(): setattr(instance, k, v) files = os.listdir(dir_path) edge_files = [f for f in files if "xgb_aligner_edge" in f] instance.edge_trained_models = defaultdict(dict) for ef in edge_files: xgb_model = xgboost.Booster() xgb_model.load_model(os.path.join(dir_path, ef)) edge_name, col_name = ef.split("___")[:2] # - same format as `save` instance.edge_trained_models[edge_name][col_name] = xgb_model node_files = [f for f in files if "xgb_aligner_node" in f] instance.node_trained_models = defaultdict(dict) for nf in node_files: xgb_model = xgboost.Booster() xgb_model.load_model(os.path.join(dir_path, nf)) node_name, col_name = ef.split("___")[:2] # - same format as `save` instance.node_trained_models[node_name][col_name] = xgb_model return instance
PyTorch/Classification/GPUNet/triton/125ms-D/runner
runner
start_NVIDIA-DGX-A100-(1x-A100-80GB)
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/bash # Evaluate Runner python3 -m "triton.125ms-D.runner.__main__" \ --config-path "triton/125ms-D/runner/config_NVIDIA-DGX-A100-(1x-A100-80GB).yaml" \ --device 0
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/deployment/convert
convert
trt
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. config: type: trt
PyTorch/SpeechSynthesis/FastPitch/scripts
scripts
download_cmudict
#!/usr/bin/env bash set -e : ${CMUDICT_DIR:="cmudict"} if [ ! -f $CMUDICT_DIR/cmudict-0.7b ]; then echo "Downloading cmudict-0.7b ..." wget https://github.com/Alexir/CMUdict/raw/master/cmudict-0.7b -qO $CMUDICT_DIR/cmudict-0.7b fi
TensorFlow2/Detection/Efficientdet
Efficientdet
__init__
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ==============================================================================
PyTorch/LanguageModeling/BERT/bert_configs
bert_configs
large
{ "attention_probs_dropout_prob": 0.1, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 1024, "initializer_range": 0.02, "intermediate_size": 4096, "max_position_embeddings": 512, "num_attention_heads": 16, "num_hidden_layers": 24, "type_vocab_size": 2, "vocab_size": 30522 }
PyTorch/SpeechSynthesis/FastPitch/platform
platform
DGXA100_FastPitch_AMP_1GPU
#!/bin/bash set -a : ${NUM_GPUS:=1} : ${BATCH_SIZE:=32} : ${GRAD_ACCUMULATION:=8} : ${AMP:=true} bash scripts/train.sh "$@"
PyTorch/Classification/GPUNet
GPUNet
train
#!/bin/bash NUM_PROC=$1 shift python3 -m torch.distributed.launch --nproc_per_node=$NUM_PROC train.py "$@"
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/scripts
scripts
run_volatility
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. : ${SEED:=1} : ${LR:=1e-3} : ${BATCH_SIZE:=1024} : ${NGPU:=8} : ${EPOCHS:=10} python -m torch.distributed.run --nproc_per_node=${NGPU} train.py \ --dataset volatility \ --data_path /data/processed/volatility_bin \ --batch_size=${BATCH_SIZE} \ --lr ${LR} \ --epochs ${EPOCHS} \ --seed ${SEED} \ --use_amp \ --results /results/TFT_volatility_bs${NGPU}x${BATCH_SIZE}_lr${LR}/seed_${SEED}
PyTorch/LanguageModeling/BERT/triton/runner/maintainer/docker/containers
containers
triton_server_container
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import pathlib from threading import Thread from typing import Dict, Generator, Union from docker.models.containers import ExecResult from docker.types import DeviceRequest, Ulimit if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ....logger import LOGGER from ...exceptions import ContainerNotStarted from ..container import DockerContainer class TritonServerContainer(DockerContainer): def __init__( self, name: str, command: str, image: str, volumes: Dict, devices: Union[list, int], environment: Dict, log_file: Union[pathlib.Path, str], network: str = "host", shm_size: str = "1G", ): """ Initialize Triton Server Container Args: name: Container name command: Triton Server command to exec on container start image: Docker Image volumes: Volumes to mount inside container devices: Devices which has to be visible in container environment: Environment variables log_file: Path where logs should be saved network: Network mode shm_size: Shared memory size """ super().__init__(name) self._image = image self._command = command self._volumes = volumes self._devices = devices self._environment = environment self._network = network self._shm_size = shm_size self._triton_exec = None self._logging_thread = None self._log_file_path = pathlib.Path(log_file) def start(self) -> None: """ Start Triton Server Container """ devices = [ DeviceRequest(capabilities=[["gpu"]], device_ids=self._devices), ] LOGGER.info(f"Triton environment: {json.dumps(self._environment, indent=4)}") LOGGER.info(f"Starting Triton container {self.name}.") self._container = self._docker_client.containers.run( image=self._image, name=self.name, device_requests=devices, detach=True, tty=True, shm_size=self._shm_size, ulimits=[ Ulimit(name="memlock", soft=-1, hard=-1), Ulimit(name="stack", soft=67108864, hard=67108864), ], volumes=self._volumes, environment=self._environment, network_mode=self._network, auto_remove=True, ipc_mode="host", ) LOGGER.info(f"Triton command:") LOGGER.info(f" {self._command}") LOGGER.info(f"Starting Triton Server {self.name}.") self._triton_exec = self._docker_api_client.exec_create( container=self._container.id, cmd=self._command, ) stream_generator = self._docker_api_client.exec_start(exec_id=self._triton_exec["Id"], stream=True) self._logging_thread = Thread(target=TritonServerContainer._logging, args=(self, stream_generator), daemon=True) self._logging_thread.start() def stop(self) -> None: """ Stop Triton Server Container and save logs to file """ if self._container is not None: triton_result = self._docker_api_client.exec_inspect(self._triton_exec["Id"]) if triton_result.get("ExitCode") not in (0, None): LOGGER.info( f"Triton Inference Server instance {self.name} failed. Exit code: {triton_result.get('ExitCode')}" ) LOGGER.info(f"Stopping triton server {self.name}.") self._container.stop() self._container = None self._docker_client.close() self._docker_api_client.close() def run(self, command: str) -> ExecResult: """ Run command in container Args: command: Command to execute Returns: ExecResult """ if not self._container: raise ContainerNotStarted("Triton Server Container is not running. Use .start() first.") return self._container.exec_run(command) def _logging(self, generator: Generator) -> None: """Triton logging thread for Triton Inference Server Args: generator (string generator): Triton log stream. """ with open(self._log_file_path, mode="w") as file: try: while True: log = next(generator) txt = log.decode("utf-8") file.write(txt) except StopIteration: LOGGER.info(f"Saving Triton Inference Server {self.name} logs in {self._log_file_path}.")
PyTorch/Detection/Efficientdet/effdet
effdet
bench
""" PyTorch EfficientDet support benches Hacked together by Ross Wightman """ # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn from utils.model_ema import ModelEma from .anchors import Anchors, AnchorLabeler, generate_detections, MAX_DETECTION_POINTS from .loss import DetectionLoss def _post_process(config, cls_outputs, box_outputs): """Selects top-k predictions. Post-proc code adapted from Tensorflow version at: https://github.com/google/automl/tree/master/efficientdet and optimized for PyTorch. Args: config: a parameter dictionary that includes `min_level`, `max_level`, `batch_size`, and `num_classes`. cls_outputs: an OrderDict with keys representing levels and values representing logits in [batch_size, height, width, num_anchors]. box_outputs: an OrderDict with keys representing levels and values representing box regression targets in [batch_size, height, width, num_anchors * 4]. """ batch_size = cls_outputs[0].shape[0] if config.fused_focal_loss: batch_size, channels, _, _ = cls_outputs[0].shape padded_classes = (config.num_classes + 7) // 8 * 8 anchors = channels // padded_classes _cls_outputs_all = [] for level in range(config.num_levels): _, _, height, width = cls_outputs[level].shape _cls_output = cls_outputs[level].permute(0, 2, 3, 1) _cls_output = _cls_output.view(batch_size, height, width, anchors, padded_classes) _cls_output = _cls_output[..., :config.num_classes] _cls_output = _cls_output.reshape([batch_size, -1, config.num_classes]) _cls_outputs_all.append(_cls_output) cls_outputs_all = torch.cat(_cls_outputs_all, 1) else: cls_outputs_all = torch.cat([ cls_outputs[level].permute(0, 2, 3, 1).reshape([batch_size, -1, config.num_classes]) for level in range(config.num_levels)], 1) box_outputs_all = torch.cat([ box_outputs[level].permute(0, 2, 3, 1).reshape([batch_size, -1, 4]) for level in range(config.num_levels)], 1) _, cls_topk_indices_all = torch.topk(cls_outputs_all.reshape(batch_size, -1), dim=1, k=MAX_DETECTION_POINTS, sorted=False) indices_all = cls_topk_indices_all // config.num_classes classes_all = cls_topk_indices_all % config.num_classes box_outputs_all_after_topk = torch.gather( box_outputs_all, 1, indices_all.unsqueeze(2).expand(-1, -1, 4)) cls_outputs_all_after_topk = torch.gather( cls_outputs_all, 1, indices_all.unsqueeze(2).expand(-1, -1, config.num_classes)) cls_outputs_all_after_topk = torch.gather( cls_outputs_all_after_topk, 2, classes_all.unsqueeze(2)) return cls_outputs_all_after_topk, box_outputs_all_after_topk, indices_all, classes_all def _batch_detection(batch_size: int, class_out, box_out, anchor_boxes, indices, classes, img_scale, img_size, soft_nms: bool = False): batch_detections = [] # FIXME we may be able to do this as a batch with some tensor reshaping/indexing, PR welcome for i in range(batch_size): detections = generate_detections( class_out[i], box_out[i], anchor_boxes, indices[i], classes[i], img_scale[i], img_size[i], soft_nms=soft_nms) batch_detections.append(detections) return torch.stack(batch_detections, dim=0) class DetBenchPredict(nn.Module): def __init__(self, model, config, soft_nms=False): super(DetBenchPredict, self).__init__() self.config = config self.model = model self.soft_nms = soft_nms self.anchors = Anchors( config.min_level, config.max_level, config.num_scales, config.aspect_ratios, config.anchor_scale, config.image_size) def forward(self, x, img_scales, img_size): class_out, box_out = self.model(x) class_out, box_out, indices, classes = _post_process(self.config, class_out, box_out) return _batch_detection( x.shape[0], class_out, box_out, self.anchors.boxes, indices, classes, img_scales, img_size, self.soft_nms) class DetBenchTrain(nn.Module): def __init__(self, model, config): super(DetBenchTrain, self).__init__() self.config = config self.model = model self.anchors = Anchors( config.min_level, config.max_level, config.num_scales, config.aspect_ratios, config.anchor_scale, config.image_size) self.loss_fn = DetectionLoss(self.config) def forward(self, x, target): class_out, box_out = self.model(x) loss, class_loss, box_loss = self.loss_fn(class_out, box_out, target, target['num_positives']) output = dict(loss=loss, class_loss=class_loss, box_loss=box_loss) if not self.training: # if eval mode, output detections for evaluation class_out, box_out, indices, classes = _post_process(self.config, class_out, box_out) output['detections'] = _batch_detection( x.shape[0], class_out, box_out, self.anchors.boxes, indices, classes, target['img_scale'], target['img_size']) return output def unwrap_bench(model): # Unwrap a model in support bench so that various other fns can access the weights and attribs of the # underlying model directly if isinstance(model, ModelEma): # unwrap ModelEma return unwrap_bench(model.ema) elif hasattr(model, 'module'): # unwrap DDP return unwrap_bench(model.module) elif hasattr(model, 'model'): # unwrap Bench -> model return unwrap_bench(model.model) else: return model
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2
tacotron2
cmudict
""" from https://github.com/keithito/tacotron """ import re valid_symbols = [ 'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2', 'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2', 'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY', 'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1', 'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0', 'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW', 'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH' ] _valid_symbol_set = set(valid_symbols) class CMUDict: '''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict''' def __init__(self, file_or_path, keep_ambiguous=True): if isinstance(file_or_path, str): with open(file_or_path, encoding='latin-1') as f: entries = _parse_cmudict(f) else: entries = _parse_cmudict(file_or_path) if not keep_ambiguous: entries = {word: pron for word, pron in entries.items() if len(pron) == 1} self._entries = entries def __len__(self): return len(self._entries) def lookup(self, word): '''Returns list of ARPAbet pronunciations of the given word.''' return self._entries.get(word.upper()) _alt_re = re.compile(r'\([0-9]+\)') def _parse_cmudict(file): cmudict = {} for line in file: if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"): parts = line.split(' ') word = re.sub(_alt_re, '', parts[0]) pronunciation = _get_pronunciation(parts[1]) if pronunciation: if word in cmudict: cmudict[word].append(pronunciation) else: cmudict[word] = [pronunciation] return cmudict def _get_pronunciation(s): parts = s.strip().split(' ') for part in parts: if part not in _valid_symbol_set: return None return ' '.join(parts)
PyTorch/SpeechSynthesis/FastPitch/platform
platform
DGXA100_FastPitch_TF32_1GPU
#!/bin/bash set -a : ${NUM_GPUS:=1} : ${BATCH_SIZE:=32} : ${GRAD_ACCUMULATION:=8} : ${AMP:=false} bash scripts/train.sh "$@"
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/data_loading
data_loading
qm9
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES # SPDX-License-Identifier: MIT from typing import Tuple import dgl import pathlib import torch from dgl.data import QM9EdgeDataset from dgl import DGLGraph from torch import Tensor from torch.utils.data import random_split, DataLoader, Dataset from tqdm import tqdm from se3_transformer.data_loading.data_module import DataModule from se3_transformer.model.basis import get_basis from se3_transformer.runtime.utils import get_local_rank, str2bool, using_tensor_cores def _get_relative_pos(qm9_graph: DGLGraph) -> Tensor: x = qm9_graph.ndata['pos'] src, dst = qm9_graph.edges() rel_pos = x[dst] - x[src] return rel_pos def _get_split_sizes(full_dataset: Dataset) -> Tuple[int, int, int]: len_full = len(full_dataset) len_train = 100_000 len_test = int(0.1 * len_full) len_val = len_full - len_train - len_test return len_train, len_val, len_test class QM9DataModule(DataModule): """ Datamodule wrapping https://docs.dgl.ai/en/latest/api/python/dgl.data.html#qm9edge-dataset Training set is 100k molecules. Test set is 10% of the dataset. Validation set is the rest. This includes all the molecules from QM9 except the ones that are uncharacterized. """ NODE_FEATURE_DIM = 6 EDGE_FEATURE_DIM = 4 def __init__(self, data_dir: pathlib.Path, task: str = 'homo', batch_size: int = 240, num_workers: int = 8, num_degrees: int = 4, amp: bool = False, precompute_bases: bool = False, **kwargs): self.data_dir = data_dir # This needs to be before __init__ so that prepare_data has access to it super().__init__(batch_size=batch_size, num_workers=num_workers, collate_fn=self._collate) self.amp = amp self.task = task self.batch_size = batch_size self.num_degrees = num_degrees qm9_kwargs = dict(label_keys=[self.task], verbose=False, raw_dir=str(data_dir)) if precompute_bases: bases_kwargs = dict(max_degree=num_degrees - 1, use_pad_trick=using_tensor_cores(amp), amp=amp) full_dataset = CachedBasesQM9EdgeDataset(bases_kwargs=bases_kwargs, batch_size=batch_size, num_workers=num_workers, **qm9_kwargs) else: full_dataset = QM9EdgeDataset(**qm9_kwargs) self.ds_train, self.ds_val, self.ds_test = random_split(full_dataset, _get_split_sizes(full_dataset), generator=torch.Generator().manual_seed(0)) train_targets = full_dataset.targets[self.ds_train.indices, full_dataset.label_keys[0]] self.targets_mean = train_targets.mean() self.targets_std = train_targets.std() def prepare_data(self): # Download the QM9 preprocessed data QM9EdgeDataset(verbose=True, raw_dir=str(self.data_dir)) def _collate(self, samples): graphs, y, *bases = map(list, zip(*samples)) batched_graph = dgl.batch(graphs) edge_feats = {'0': batched_graph.edata['edge_attr'][:, :self.EDGE_FEATURE_DIM, None]} batched_graph.edata['rel_pos'] = _get_relative_pos(batched_graph) # get node features node_feats = {'0': batched_graph.ndata['attr'][:, :self.NODE_FEATURE_DIM, None]} targets = (torch.cat(y) - self.targets_mean) / self.targets_std if bases: # collate bases all_bases = { key: torch.cat([b[key] for b in bases[0]], dim=0) for key in bases[0][0].keys() } return batched_graph, node_feats, edge_feats, all_bases, targets else: return batched_graph, node_feats, edge_feats, targets @staticmethod def add_argparse_args(parent_parser): parser = parent_parser.add_argument_group("QM9 dataset") parser.add_argument('--task', type=str, default='homo', const='homo', nargs='?', choices=['mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv', 'U0_atom', 'U_atom', 'H_atom', 'G_atom', 'A', 'B', 'C'], help='Regression task to train on') parser.add_argument('--precompute_bases', type=str2bool, nargs='?', const=True, default=False, help='Precompute bases at the beginning of the script during dataset initialization,' ' instead of computing them at the beginning of each forward pass.') return parent_parser def __repr__(self): return f'QM9({self.task})' class CachedBasesQM9EdgeDataset(QM9EdgeDataset): """ Dataset extending the QM9 dataset from DGL with precomputed (cached in RAM) pairwise bases """ def __init__(self, bases_kwargs: dict, batch_size: int, num_workers: int, *args, **kwargs): """ :param bases_kwargs: Arguments to feed the bases computation function :param batch_size: Batch size to use when iterating over the dataset for computing bases """ self.bases_kwargs = bases_kwargs self.batch_size = batch_size self.bases = None self.num_workers = num_workers super().__init__(*args, **kwargs) def load(self): super().load() # Iterate through the dataset and compute bases (pairwise only) # Potential improvement: use multi-GPU and gather dataloader = DataLoader(self, shuffle=False, batch_size=self.batch_size, num_workers=self.num_workers, collate_fn=lambda samples: dgl.batch([sample[0] for sample in samples])) bases = [] for i, graph in tqdm(enumerate(dataloader), total=len(dataloader), desc='Precomputing QM9 bases', disable=get_local_rank() != 0): rel_pos = _get_relative_pos(graph) # Compute the bases with the GPU but convert the result to CPU to store in RAM bases.append({k: v.cpu() for k, v in get_basis(rel_pos.cuda(), **self.bases_kwargs).items()}) self.bases = bases # Assign at the end so that __getitem__ isn't confused def __getitem__(self, idx: int): graph, label = super().__getitem__(idx) if self.bases: bases_idx = idx // self.batch_size bases_cumsum_idx = self.ne_cumsum[idx] - self.ne_cumsum[bases_idx * self.batch_size] bases_cumsum_next_idx = self.ne_cumsum[idx + 1] - self.ne_cumsum[bases_idx * self.batch_size] return graph, label, {key: basis[bases_cumsum_idx:bases_cumsum_next_idx] for key, basis in self.bases[bases_idx].items()} else: return graph, label
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cuda
cuda
generate_mask_targets
/** * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <torch/extension.h> #include <iostream> #include <vector> #include <cuda.h> #include <cuda_runtime.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <math.h> #include <algorithm> #include <stdlib.h> #include "cpu/vision.h" /*rle cuda kernels are cuda version of the corresponding cpu functions here https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c these are only a subset of rle kernels.*/ typedef unsigned int uint; typedef unsigned long siz; typedef unsigned char byte; //6144 is based on minimum shared memory size per SM //across all pytorch-supported GPUs. Need to use blocking //to avoid this restriction const int BUFFER_SIZE=6144; const int CNTS_SIZE=6144; __global__ void crop_and_scale_cuda_kernel(double *dense_poly_data, int *per_anchor_poly_idx, int *poly_rel_idx, int poly_count, int anchor_count, float4 *anchor_data, int mask_size){ int tid = threadIdx.x; int block_jump = blockDim.x; int poly_id = blockIdx.x; int anchor_idx; for (anchor_idx = 0; anchor_idx < anchor_count; anchor_idx++){ if (poly_id < per_anchor_poly_idx[anchor_idx + 1]) break; } float w = anchor_data[anchor_idx].z - anchor_data[anchor_idx].x; float h = anchor_data[anchor_idx].w - anchor_data[anchor_idx].y; w = fmaxf(w, 1.0f); h = fmaxf(h, 1.0f); float ratio_h = ((float) mask_size) / h; float ratio_w = ((float) mask_size) / w; int poly_ptr_idx_start = poly_rel_idx[poly_id]; int poly_ptr_idx_end = poly_rel_idx[poly_id + 1]; double *poly_data_buf = dense_poly_data + poly_ptr_idx_start; int len = poly_ptr_idx_end - poly_ptr_idx_start; for (int j = tid; j < len; j += block_jump){ if (j % 2 == 0) poly_data_buf[j] = ratio_w*((float) poly_data_buf[j]- anchor_data[anchor_idx].x); if (j % 2 == 1) poly_data_buf[j] = ratio_h*((float) poly_data_buf[j]- anchor_data[anchor_idx].y); } } //merging masks happens on mask format, not RLE format. __global__ void merge_masks_cuda_kernel(byte *masks_in, float *masks_out, const int mask_size, int *per_anchor_poly_idx, int anchor_count){ int anchor_idx = blockIdx.x; int tid = threadIdx.x; int jump_block = blockDim.x; int mask_start_idx = per_anchor_poly_idx[anchor_idx]; int num_of_masks_to_merge = per_anchor_poly_idx[anchor_idx + 1]-per_anchor_poly_idx[anchor_idx]; for(int j = tid; j < mask_size * mask_size; j += jump_block){ int transposed_pixel = (j % mask_size) * mask_size + j / mask_size; byte pixel = 0; for(int k = 0; k < num_of_masks_to_merge; k++){ if (masks_in[(mask_start_idx + k) * mask_size * mask_size + j] == 1) pixel = 1; if (pixel == 1) break; } masks_out[anchor_idx * mask_size * mask_size + transposed_pixel] = (float) pixel; } } /*cuda version of rleDecode function in this API: https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/ __global__ void decode_rle_cuda_kernel(const int *num_of_cnts, uint *cnts, long h, long w, byte *mask) { int poly_id = blockIdx.x; int tid = threadIdx.x; int block_jump = blockDim.x; int m = num_of_cnts[poly_id]; uint *cnts_buf = cnts + CNTS_SIZE * poly_id; byte *mask_ptr = mask + poly_id * h * w; __shared__ uint shbuf1[CNTS_SIZE]; __shared__ uint shbuf2[CNTS_SIZE]; //initialize shbuf for scan. first element is 0 (exclusive scan) for (long i = tid; i < CNTS_SIZE; i += block_jump){ shbuf1[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0; shbuf2[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0; } __syncthreads(); //double buffering for scan int switch_buf = 0; for (int offset = 1; offset <= m; offset *= 2){ switch_buf = 1 - switch_buf; if(switch_buf == 0){ for(int j = tid;j <= m;j += block_jump){ if(j >= offset) shbuf2[j] = shbuf1[j]+shbuf1[j - offset]; else shbuf2[j] = shbuf1[j]; } }else if (switch_buf == 1){ for(int j = tid;j <= m;j += block_jump){ if(j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } uint *scanned_buf = switch_buf == 0 ? shbuf2 : shbuf1; //find which bin pixel j falls into , which determines the pixel value //use binary search for(int j = tid; j < h * w; j += block_jump){ int bin = 0; int min_idx = 0; int max_idx = m; int mid_idx = m / 2; while(max_idx > min_idx){ if(j > scanned_buf[mid_idx]) { min_idx = mid_idx+1; mid_idx = (min_idx + max_idx) / 2; } else if (j < scanned_buf[mid_idx]) { max_idx = mid_idx; mid_idx = (min_idx + max_idx) / 2; } else { mid_idx++; break; } } int k = mid_idx; byte pixel = k % 2 == 0 ? 1 : 0; mask_ptr[j] = pixel; } } /*cuda version of rleFrPoly function in this API: https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/ __global__ void rle_fr_poly_cuda_kernel(const double *dense_coordinates, int *poly_rel_idx, long h, long w, uint *cnts, int *x_in, int *y_in, int *u_in, int *v_in, uint *a_in, uint *b_in, int *num_of_cnts) { int poly_id = blockIdx.x; int tid = threadIdx.x; int block_jump = blockDim.x; long cnts_offset = poly_id * CNTS_SIZE; long k = (poly_rel_idx[poly_id + 1] - poly_rel_idx[poly_id]) / 2; const double *xy = dense_coordinates + poly_rel_idx[poly_id]; int *x = x_in + poly_id * BUFFER_SIZE; int *y = y_in + poly_id * BUFFER_SIZE; int *u = u_in + poly_id * BUFFER_SIZE; int *v = v_in + poly_id * BUFFER_SIZE; uint *a = a_in + poly_id * BUFFER_SIZE; uint *b = b_in + poly_id * BUFFER_SIZE; /* upsample and get discrete points densely along entire boundary */ long j, m = 0; double scale = 5; __shared__ int shbuf1[BUFFER_SIZE]; __shared__ int shbuf2[BUFFER_SIZE]; for(long j = tid; j < BUFFER_SIZE; j += block_jump) { shbuf1[j] = 0; shbuf2[j] = 0; } for(long j = tid; j <= k; j += block_jump) x[j] = j < k ? ((int) (scale * xy[2 * j + 0] + 0.5)) : ((int) (scale * xy[0] + 0.5)); for(long j = tid; j <= k; j += block_jump) y[j] = j < k ? ((int) (scale * xy[2 * j + 1] + 0.5)) : ((int) (scale * xy[1] + 0.5)); __syncthreads(); for(int j = tid; j < k; j += block_jump){ int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist; int flip; double s; dx = abs(xe - xs); dy = abs(ys - ye); flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye); if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;} s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy; dist = dx >= dy ? dx + 1 : dy + 1; shbuf1[j + 1] = dist; shbuf2[j + 1] = dist; } __syncthreads(); //block-wide exclusive prefix scan int switch_buf = 0; for (int offset = 1; offset <= k; offset *= 2){ switch_buf = 1 - switch_buf; if (switch_buf == 0){ for(int j = tid; j <= k; j += block_jump){ if (j >= offset) shbuf2[j] = shbuf1[j] + shbuf1[j - offset]; else shbuf2[j] = shbuf1[j]; } } else if (switch_buf == 1){ for(int j = tid; j <= k; j += block_jump){ if (j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } for (int j = tid; j < k; j += block_jump){ int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist; int flip; double s; dx = __sad(xe, xs, 0); dy = __sad(ys, ye, 0); flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye); if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;} s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy; m = switch_buf == 0 ? shbuf2[j] : shbuf1[j]; if (dx >= dy) for (d = 0; d <= dx; d++) { /*the multiplication statement 's*t' causes nvcc to optimize with flush-to-zero=True for double precision multiply, which we observe produces different results than CPU occasionally. To force flush-to-zero=False, we use __dmul_rn intrinsics function */ t = flip ? dx - d : d; u[m] = t + xs; v[m] = (int) (ys + __dmul_rn(s, t) + .5); m++; } else for (d = 0; d <= dy; d++) { t = flip ? dy - d : d; v[m] = t + ys; u[m] = (int) (xs + __dmul_rn(s, t) + .5); m++; } } __syncthreads(); m = switch_buf == 0 ? shbuf2[k] : shbuf1[k]; int k2 = m; __syncthreads(); double xd, yd; if (tid == 0) { shbuf1[tid] = 0; shbuf2[tid] = 0; } /* get points along y-boundary and downsample */ for (int j = tid; j < k2; j += block_jump){ if (j > 0){ if (u[j] != u[j - 1]){ xd = (double) (u[j] < u[j-1] ? u[j] : u[j] - 1); xd = (xd + .5) / scale - .5; if (floor(xd) != xd || xd < 0 || xd > w - 1 ) { shbuf1[j] = 0; shbuf2[j] = 0; continue; } yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]); yd = (yd + .5) / scale - .5; if (yd < 0) yd = 0; else if (yd > h) yd = h; yd = ceil(yd); shbuf1[j] = 1; shbuf2[j] = 1; } else { shbuf1[j] = 0; shbuf2[j] = 0; } } } __syncthreads(); //exclusive prefix scan switch_buf = 0; for (int offset = 1; offset < k2; offset *= 2){ switch_buf = 1 - switch_buf; if (switch_buf == 0){ for (int j = tid; j < k2; j += block_jump){ if (j >= offset) shbuf2[j] = shbuf1[j - offset] + shbuf1[j]; else shbuf2[j] = shbuf1[j]; } } else if (switch_buf == 1){ for (int j = tid; j < k2; j += block_jump){ if (j >= offset) shbuf1[j] = shbuf2[j - offset] + shbuf2[j]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } for (int j = tid; j < k2; j += block_jump){ if (j > 0){ if(u[j] != u[j - 1]){ xd = (double) (u[j] < u[j - 1] ? u[j] : u[j] - 1); xd = (xd + .5) / scale - .5; if (floor(xd) != xd || xd < 0 || xd > w - 1) {continue;} yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]); yd = (yd + .5) / scale - .5; if (yd < 0) yd = 0; else if (yd > h) yd = h; yd = ceil(yd); m = switch_buf == 0 ? shbuf2[j - 1]:shbuf1[j - 1]; x[m] = (int) xd; y[m] = (int) yd; m++; } } } __syncthreads(); /* compute rle encoding given y-boundary points */ m = switch_buf == 0 ? shbuf2[k2 - 1] : shbuf1[k2 - 1]; int k3 = m; for (int j = tid; j <= k3; j += block_jump){ if (j < k3) a[j] = (uint) (x[j] * (int) (h) + y[j]); else a[j] = (uint)(h * w); } k3++; __syncthreads(); //run brick sort on a for k3+1 element //load k3+1 elements of a into shared memory for(long j = tid; j < k3; j += block_jump) shbuf1[j]=a[j]; __syncthreads(); uint a_temp; for (int r = 0; r <= k3 / 2; r++){ int evenCas = k3 / 2; int oddCas = (k3 - 1) / 2; //start with 0, need (k3+1)/2 CAS for (int j = tid; j < evenCas; j += block_jump){ if (shbuf1[2 * j] > shbuf1[2 * j + 1]){ a_temp = shbuf1[2 * j]; shbuf1[2 * j]=shbuf1[2 * j + 1]; shbuf1[2 * j + 1] = a_temp; } } __syncthreads(); //start with 1 for (int j = tid; j < oddCas; j += block_jump){ if (shbuf1[2 * j + 1] > shbuf1[2 * j + 2]){ a_temp=shbuf1[2 * j + 1]; shbuf1[2 * j + 1] = shbuf1[2 * j + 2]; shbuf1[2 * j + 2]=a_temp; } } __syncthreads(); } for(long j = tid; j < k3; j += block_jump) { if(j>0) shbuf2[j] = shbuf1[j - 1]; else shbuf2[j] = 0; } __syncthreads(); for(int j = tid; j < k3; j += block_jump){ shbuf1[j] -= shbuf2[j]; } __syncthreads(); uint *cnts_buf = cnts + cnts_offset; if (tid == 0){ j = m = 0; cnts_buf[m++] = shbuf1[j++]; while (j < k3) if (shbuf1[j] > 0) cnts_buf[m++] = shbuf1[j++]; else { j++; if (j < k3) cnts_buf[m - 1] += shbuf1[j++]; } num_of_cnts[poly_id] = m; } __syncthreads(); } at::Tensor generate_mask_targets_cuda(at::Tensor dense_vector, const std::vector<std::vector<at::Tensor>> polygons, const at::Tensor anchors, const int mask_size){ const int M = mask_size; assert (M < 32); //if M >=32, shared memory buffer size may not be //sufficient. Need to fix this by blocking float *d_anchor_data = anchors.data_ptr<float>(); int num_of_anchors = anchors.size(0); auto per_anchor_poly_idx = at::empty({num_of_anchors + 1}, at::CPU(at::kInt)); int num_of_poly = 0; for (int i = 0; i < num_of_anchors; i++){ *(per_anchor_poly_idx.data_ptr<int>() + i) = num_of_poly; num_of_poly += polygons[i].size(); } *(per_anchor_poly_idx.data_ptr<int>() + num_of_anchors) = num_of_poly; auto poly_rel_idx = at::empty({num_of_poly + 1}, at::CPU(at::kInt)); double *dense_poly_data = dense_vector.data_ptr<double>(); int start_idx = 0; int poly_count = 0; for(int i = 0; i < polygons.size(); i++){ for(int j=0; j < polygons[i].size(); j++) { *(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx; start_idx += polygons[i][j].size(0); poly_count++; } } *(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx; at::Tensor d_x_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_y_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_u_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_v_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_a_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));//used with uint* pointer at::Tensor d_b_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); //used with uint* pointer at::Tensor d_mask_t = torch::empty({M * M * num_of_poly}, torch::CUDA(at::kByte)); auto result = torch::empty({num_of_anchors, M, M}, torch::CUDA(at::kFloat)); at::Tensor d_num_of_counts_t = torch::empty({num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_cnts_t = torch::empty({CNTS_SIZE * num_of_poly}, torch::CUDA(at::kInt)); auto d_dense_vector = dense_vector.cuda(); auto d_per_anchor_poly_idx = per_anchor_poly_idx.cuda(); auto d_poly_rel_idx = poly_rel_idx.cuda(); auto stream = at::cuda::getCurrentCUDAStream(); crop_and_scale_cuda_kernel<<<num_of_poly, 256, 0, stream.stream()>>>(d_dense_vector.data_ptr<double>(), d_per_anchor_poly_idx.data_ptr<int>(), d_poly_rel_idx.data_ptr<int>(), poly_count, num_of_anchors, (float4*) d_anchor_data, M); //TODO: larger threads-per-block might be better here, because each CTA uses 32 KB of shmem, //and occupancy is likely shmem capacity bound rle_fr_poly_cuda_kernel<<<num_of_poly, 1024, 0, stream.stream()>>>(d_dense_vector.data_ptr<double>(), d_poly_rel_idx.data_ptr<int>(), M, M, (uint*) d_cnts_t.data_ptr<int>(), d_x_t.data_ptr<int>(), d_y_t.data_ptr<int>(), d_u_t.data_ptr<int>(), d_v_t.data_ptr<int>(), (uint*) d_a_t.data_ptr<int>(), (uint*) d_b_t.data_ptr<int>(), d_num_of_counts_t.data_ptr<int>()); decode_rle_cuda_kernel<<<num_of_poly, 256, 0, stream.stream()>>>(d_num_of_counts_t.data_ptr<int>(), (uint*) d_cnts_t.data_ptr<int>(), M, M, d_mask_t.data_ptr<byte>()); merge_masks_cuda_kernel<<<num_of_anchors, 256, 0, stream.stream()>>>(d_mask_t.data_ptr<byte>(), result.data_ptr<float>(), M, d_per_anchor_poly_idx.data_ptr<int>(), num_of_anchors); return result; }
TensorFlow/Classification/ConvNets/utils/hooks
hooks
__init__
#!/usr/bin/env python # -*- coding: utf-8 -*- from utils.hooks.training_hooks import * from utils.hooks.benchmark_hooks import * from utils.hooks.prefill_hook import *
TensorFlow/Detection/SSD/models/research/object_detection/models
models
faster_rcnn_inception_resnet_v2_feature_extractor
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Inception Resnet v2 Faster R-CNN implementation. See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261) as well as "Speed/accuracy trade-offs for modern convolutional object detectors" by Huang et al. (https://arxiv.org/abs/1611.10012) """ import tensorflow as tf from object_detection.meta_architectures import faster_rcnn_meta_arch from nets import inception_resnet_v2 slim = tf.contrib.slim class FasterRCNNInceptionResnetV2FeatureExtractor( faster_rcnn_meta_arch.FasterRCNNFeatureExtractor): """Faster R-CNN with Inception Resnet v2 feature extractor implementation.""" def __init__(self, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0): """Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. batch_norm_trainable: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16. """ if first_stage_features_stride != 8 and first_stage_features_stride != 16: raise ValueError('`first_stage_features_stride` must be 8 or 16.') super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay) def preprocess(self, resized_inputs): """Faster R-CNN with Inception Resnet v2 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def _extract_proposal_features(self, preprocessed_inputs, scope): """Extracts first stage RPN features. Extracts features using the first half of the Inception Resnet v2 network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation. """ if len(preprocessed_inputs.get_shape().as_list()) != 4: raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a ' 'tensor of shape %s' % preprocessed_inputs.get_shape()) with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( weight_decay=self._weight_decay)): # Forces is_training to False to disable batch norm update. with slim.arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights) as scope: return inception_resnet_v2.inception_resnet_v2_base( preprocessed_inputs, final_endpoint='PreAuxLogits', scope=scope, output_stride=self._first_stage_features_stride, align_feature_maps=True) def _extract_box_classifier_features(self, proposal_feature_maps, scope): """Extracts second stage box classifier features. This function reconstructs the "second half" of the Inception ResNet v2 network after the part defined in `_extract_proposal_features`. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal. """ with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights): with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope( weight_decay=self._weight_decay)): # Forces is_training to False to disable batch norm update. with slim.arg_scope([slim.batch_norm], is_training=self._train_batch_norm): with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'): with tf.variable_scope('Mixed_7a'): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv_1 = slim.conv2d( tower_conv, 384, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): tower_conv1 = slim.conv2d( proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d( tower_conv1, 288, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): tower_conv2 = slim.conv2d( proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1') tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3, scope='Conv2d_0b_3x3') tower_conv2_2 = slim.conv2d( tower_conv2_1, 320, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_3'): tower_pool = slim.max_pool2d( proposal_feature_maps, 3, stride=2, padding='VALID', scope='MaxPool_1a_3x3') net = tf.concat( [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3) net = slim.repeat(net, 9, inception_resnet_v2.block8, scale=0.20) net = inception_resnet_v2.block8(net, activation_fn=None) proposal_classifier_features = slim.conv2d( net, 1536, 1, scope='Conv2d_7b_1x1') return proposal_classifier_features def restore_from_classification_checkpoint_fn( self, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope): """Returns a map of variables to load from a foreign checkpoint. Note that this overrides the default implementation in faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for InceptionResnetV2 checkpoints. TODO(jonathanhuang,rathodv): revisit whether it's possible to force the `Repeat` namescope as created in `_extract_box_classifier_features` to start counting at 2 (e.g. `Repeat_2`) so that the default restore_fn can be used. Args: first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ variables_to_restore = {} for variable in tf.global_variables(): if variable.op.name.startswith( first_stage_feature_extractor_scope): var_name = variable.op.name.replace( first_stage_feature_extractor_scope + '/', '') variables_to_restore[var_name] = variable if variable.op.name.startswith( second_stage_feature_extractor_scope): var_name = variable.op.name.replace( second_stage_feature_extractor_scope + '/InceptionResnetV2/Repeat', 'InceptionResnetV2/Repeat_2') var_name = var_name.replace( second_stage_feature_extractor_scope + '/', '') variables_to_restore[var_name] = variable return variables_to_restore