|
|
import torch |
|
|
|
|
|
from pytorch_lightning import Callback, Trainer, LightningModule |
|
|
|
|
|
import logging |
|
|
|
|
|
log = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
def l2_promote(): |
|
|
import ctypes |
|
|
_libcudart = ctypes.CDLL('libcudart.so') |
|
|
|
|
|
|
|
|
pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int)) |
|
|
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128)) |
|
|
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05)) |
|
|
assert pValue.contents.value == 128 |
|
|
|
|
|
|
|
|
def set_affinity(trainer): |
|
|
try: |
|
|
from src.utils.gpu_affinity import set_affinity |
|
|
nproc_per_node = torch.cuda.device_count() |
|
|
affinity = set_affinity(trainer.local_rank, nproc_per_node, 'socket_unique_continuous') |
|
|
log.info(f'{trainer.local_rank}: thread affinity: {affinity}') |
|
|
|
|
|
|
|
|
|
|
|
except: |
|
|
pass |
|
|
|
|
|
|
|
|
class GpuAffinity(Callback): |
|
|
"""Set GPU affinity and increase the L2 fetch granularity. |
|
|
Adapted from https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/Transformer-XL |
|
|
""" |
|
|
|
|
|
def setup(self, trainer: Trainer, pl_module: LightningModule, stage=None) -> None: |
|
|
set_affinity(trainer) |
|
|
|