library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/distributed/test_c10d_nccl.py
test_timeout_dumps_on_stuck_ranks
def test_timeout_dumps_on_stuck_ranks(self): # need rank0 to crash quicker after detecting timeout os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "1" # restore this env var to its prior default in case another test changed it os.environ["TORCH_NCCL_COORD_CHECK_MILSEC"] = "1000" if self.rank == self.MAIN_PROCESS_RANK: # wait for both rank0 and 1 to crash before looking for both ranks' output # file, and we rely on rank1 to sleep long enough to dump the debug info. self.assertEqual(self._wait_process(0, timeout=90), -6) self.assertEqual(self._wait_process(1, timeout=90), -6) self.assertTrue(os.path.exists(self._trace_name(rank=1))) self.assertTrue(os.path.exists(self._trace_name(rank=0))) with open(self._trace_name(rank=0), "rb") as f: t = pickle.load(f) t = t["entries"] self.assertEqual(len(t), 2) with open(self._trace_name(rank=1), "rb") as f: t = pickle.load(f) t = t["entries"] self.assertEqual(len(t), 1) self.assertEqual(t[0]["collective_seq_id"], 1) self.assertEqual(t[0]["state"], "completed") return pg = self._create_process_group_nccl() device = self.local_device with torch.cuda.device(device): a = torch.full((3, 4), float(self.rank), device=device) pg.allreduce(a).wait() if self.rank == 0: pg.allreduce(a).wait() # rank 0 will get stuck, timeout and then signal a timeout to all ranks. torch.cuda.synchronize(device=device) if self.rank == 1: # Force rank 1 to idle so that it will eventually timeout as well after # getting the global signal to dump the debugging info. time.sleep(600)
import copy import json import os import pickle import random import re import signal import sys import tempfile import threading import time import warnings from contextlib import contextmanager from datetime import datetime, timedelta from enum import auto, Enum from itertools import chain, product from unittest import mock, SkipTest import torch import torch.distributed as c10d from typing import Dict, List import test_c10d_common from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook import torch.distributed as dist import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch._C._distributed_c10d import OpType from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_distributed import ( get_timeout, init_multigpu_helper, MultiProcessTestCase, requires_gloo, requires_nccl, requires_nccl_version, skip_if_lt_x_gpu, skip_if_rocm_multiprocess, TEST_SKIPS, with_dist_debug_levels, with_nccl_blocking_wait, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, skip_but_pass_in_sandcastle_if, TEST_CUDA, TEST_WITH_DEV_DBG_ASAN, TEST_WITH_ROCM, TestCase, ) BFLOAT16_AVAILABLE = torch.cuda.is_available() and ( (torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11) or torch.version.hip is not None ) from torch.distributed.distributed_c10d import _get_process_group_uid from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter from torch.testing._internal.common_cuda import SM80OrLater @skip_but_pass_in_sandcastle class NCCLTraceTestTimeoutDumpOnStuckRanks(NCCLTraceTestDumpOnTimeoutBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_nccl.py
_wait_process
def _wait_process(self, rank, timeout): try: self.processes[rank].join(timeout) return self.processes[rank].exitcode except TimeoutError: return None
import copy import json import os import pickle import random import re import signal import sys import tempfile import threading import time import warnings from contextlib import contextmanager from datetime import datetime, timedelta from enum import auto, Enum from itertools import chain, product from unittest import mock, SkipTest import torch import torch.distributed as c10d from typing import Dict, List import test_c10d_common from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook import torch.distributed as dist import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch._C._distributed_c10d import OpType from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_distributed import ( get_timeout, init_multigpu_helper, MultiProcessTestCase, requires_gloo, requires_nccl, requires_nccl_version, skip_if_lt_x_gpu, skip_if_rocm_multiprocess, TEST_SKIPS, with_dist_debug_levels, with_nccl_blocking_wait, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, skip_but_pass_in_sandcastle_if, TEST_CUDA, TEST_WITH_DEV_DBG_ASAN, TEST_WITH_ROCM, TestCase, ) BFLOAT16_AVAILABLE = torch.cuda.is_available() and ( (torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11) or torch.version.hip is not None ) from torch.distributed.distributed_c10d import _get_process_group_uid from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter from torch.testing._internal.common_cuda import SM80OrLater class NCCLTraceTestDumpOnTimeoutBase(NCCLTraceTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_nccl.py
_check_return_codes
def _check_return_codes(self, elapsed_time): # the base test infra assumes processes exit with matching return codes, # but we want rank0 to abort and rank1 to exit cleanly in this test self.assertEqual(self.processes[0].exitcode, -6) self.assertEqual(self.processes[1].exitcode, 0)
import copy import json import os import pickle import random import re import signal import sys import tempfile import threading import time import warnings from contextlib import contextmanager from datetime import datetime, timedelta from enum import auto, Enum from itertools import chain, product from unittest import mock, SkipTest import torch import torch.distributed as c10d from typing import Dict, List import test_c10d_common from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook import torch.distributed as dist import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch._C._distributed_c10d import OpType from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_distributed import ( get_timeout, init_multigpu_helper, MultiProcessTestCase, requires_gloo, requires_nccl, requires_nccl_version, skip_if_lt_x_gpu, skip_if_rocm_multiprocess, TEST_SKIPS, with_dist_debug_levels, with_nccl_blocking_wait, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, skip_but_pass_in_sandcastle_if, TEST_CUDA, TEST_WITH_DEV_DBG_ASAN, TEST_WITH_ROCM, TestCase, ) BFLOAT16_AVAILABLE = torch.cuda.is_available() and ( (torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11) or torch.version.hip is not None ) from torch.distributed.distributed_c10d import _get_process_group_uid from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter from torch.testing._internal.common_cuda import SM80OrLater class NCCLTraceTestDumpOnTimeoutBase(NCCLTraceTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_nccl.py
test_nccl_errors_dump
def test_nccl_errors_dump(self): os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "1" os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = "1000" os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "1" # need rank0 to dump before abort os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "5" if self.rank == self.MAIN_PROCESS_RANK: # wait for both rank0 and 1 to crash before looking for dump self.assertEqual(self._wait_process(0, timeout=90), -6) self.assertEqual(self._wait_process(1, timeout=90), 1) # verify that the trace file exists for rank0 self.assertTrue(os.path.exists(self._trace_name(rank=0))) return store = c10d.FileStore(self.file_name, self.world_size) process_group = c10d.ProcessGroupNCCL( store, self.rank, self.world_size, timeout=timedelta(seconds=10), ) process_group.allreduce(torch.rand(10).cuda(self.rank)) if self.rank == 0: work = process_group.allreduce(torch.rand(10).cuda(self.rank)) # expect an error to be raised with self.assertRaisesRegex(dist.DistBackendError, ""): # Block the current stream on the NCCL stream work.wait() # Run some GPU operations a = torch.rand(10).cuda(self.rank) elif self.rank == 1: # Clean up structures (ex: files for FileStore before going down) del process_group sys.exit(1) # tests that needs to be run with a larger world size
import copy import json import os import pickle import random import re import signal import sys import tempfile import threading import time import warnings from contextlib import contextmanager from datetime import datetime, timedelta from enum import auto, Enum from itertools import chain, product from unittest import mock, SkipTest import torch import torch.distributed as c10d from typing import Dict, List import test_c10d_common from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook import torch.distributed as dist import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch._C._distributed_c10d import OpType from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_distributed import ( get_timeout, init_multigpu_helper, MultiProcessTestCase, requires_gloo, requires_nccl, requires_nccl_version, skip_if_lt_x_gpu, skip_if_rocm_multiprocess, TEST_SKIPS, with_dist_debug_levels, with_nccl_blocking_wait, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, skip_but_pass_in_sandcastle_if, TEST_CUDA, TEST_WITH_DEV_DBG_ASAN, TEST_WITH_ROCM, TestCase, ) BFLOAT16_AVAILABLE = torch.cuda.is_available() and ( (torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11) or torch.version.hip is not None ) from torch.distributed.distributed_c10d import _get_process_group_uid from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter from torch.testing._internal.common_cuda import SM80OrLater @skip_but_pass_in_sandcastle class NcclErrorDumpTest(NCCLTraceTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_nccl.py
_create_process_group_nccl
def _create_process_group_nccl(self, store, opts): # create nccl processgroup with opts c10d.init_process_group( "nccl", world_size=self.world_size, rank=self.rank, store=store, pg_options=opts) pg = c10d.distributed_c10d._get_default_group() return pg
def _create_process_group_nccl(self, store, opts, device_id=None): # create nccl processgroup with opts c10d.init_process_group( "nccl", world_size=self.world_size, rank=self.rank, store=store, pg_options=opts, device_id=device_id, ) pg = c10d.distributed_c10d._get_default_group() return pg
import copy import math import os import random import signal import sys import tempfile import threading import time from contextlib import contextmanager from datetime import timedelta from itertools import product from unittest import mock import torch import torch.distributed as c10d import test_c10d_common import torch.distributed as dist import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD import torch.nn.functional as F import torch.testing._internal.common_utils as common from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, init_multigpu_helper, requires_nccl, requires_gloo, requires_nccl_version, skip_if_lt_x_gpu, get_timeout, skip_if_rocm, with_dist_debug_levels, with_nccl_blocking_wait, ) from torch.testing._internal.common_utils import ( TestCase, run_tests, retry_on_connect_failures, TEST_WITH_DEV_DBG_ASAN, TEST_WITH_ROCM, sandcastle_skip, sandcastle_skip_if, ) BFLOAT16_AVAILABLE = ( torch.cuda.is_available() and ( (torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11) or torch.version.hip is not None ) ) class ProcessGroupNCCLTest(MultiProcessTestCase):
import copy import json import os import pickle import random import re import signal import sys import tempfile import threading import time import warnings from contextlib import contextmanager from datetime import datetime, timedelta from enum import auto, Enum from itertools import chain, product from unittest import mock, SkipTest import torch import torch.distributed as c10d from typing import Dict, List import test_c10d_common from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook import torch.distributed as dist import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch._C._distributed_c10d import OpType from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_distributed import ( get_timeout, init_multigpu_helper, MultiProcessTestCase, requires_gloo, requires_nccl, requires_nccl_version, skip_if_lt_x_gpu, skip_if_rocm_multiprocess, TEST_SKIPS, with_dist_debug_levels, with_nccl_blocking_wait, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, skip_but_pass_in_sandcastle_if, TEST_CUDA, TEST_WITH_DEV_DBG_ASAN, TEST_WITH_ROCM, TestCase, ) BFLOAT16_AVAILABLE = torch.cuda.is_available() and ( (torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11) or torch.version.hip is not None ) class ProcessGroupNCCLGroupTest(MultiProcessTestCase): from torch.distributed.distributed_c10d import _get_process_group_uid from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter from torch.testing._internal.common_cuda import SM80OrLater
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_c10d_ops_nccl.py
test_tensor_register_hook
def test_tensor_register_hook(self): os.environ["TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK"] = "1" pg = self.pg local_device_id = self.rank_to_GPU[self.rank][0] def allgather_base(output_t, input_t): work = pg._allgather_base(output_t, input_t) work.wait() # allgather_base is GPU number agnostic. # Each rank contribute one tensor regardless of GPU counts tensor = torch.tensor([self.rank]).cuda(local_device_id) output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda( local_device_id ) allgather_base(output_t, tensor) # Verification self.assertEqual(torch.arange(self.world_size), output_t) # Unset env del os.environ["TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK"]
import math import os import sys import tempfile import torch import torch.distributed as c10d import torch.distributed as dist from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_distributed import ( init_multigpu_helper, MultiProcContinousTest, requires_nccl, ) from torch.testing._internal.common_utils import ( skip_but_pass_in_sandcastle_if, skipIfRocm, TEST_WITH_DEV_DBG_ASAN, ) class ProcessGroupNCCLOpTest(MultiProcContinousTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ops_nccl.py
allgather_base
def allgather_base(output_t, input_t): work = pg._allgather_base(output_t, input_t) work.wait() # allgather_base is GPU number agnostic. # Each rank contribute one tensor regardless of GPU counts tensor = torch.tensor([self.rank]).cuda(local_device_id) output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda( local_device_id ) allgather_base(output_t, tensor) # Verification self.assertEqual(torch.arange(self.world_size), output_t)
import math import os import sys import tempfile import torch import torch.distributed as c10d import torch.distributed as dist from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_distributed import ( init_multigpu_helper, MultiProcContinousTest, requires_nccl, ) from torch.testing._internal.common_utils import ( skip_but_pass_in_sandcastle_if, skipIfRocm, TEST_WITH_DEV_DBG_ASAN, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_barrier_implies_wait
def test_barrier_implies_wait(self): pg = self._create_process_group_ucc() # Kick off allreduce operations size = (100, 100) num = 16 tensors = [torch.full(size, float(i)) for i in range(num)] for tensor in tensors: # Note: leak the returned work handle pg.allreduce(tensor) # Barrier should ensure all previous work has completed pg.barrier().get_future().wait() for i, tensor in enumerate(tensors): self.assertEqual(torch.full(size, float(i * self.world_size)), tensor)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class ProcessGroupUCCTest(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
setUp
def setUp(self): super().setUp() self._spawn_processes()
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class ProcessGroupUCCTest(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
_get_process_group
def _get_process_group(self): store = self._get_store() c10d.init_process_group( "ucc", store=store, rank=self.rank, world_size=self.world_size ) return c10d.distributed_c10d._get_default_group()
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class DistributedDataParallelTest( test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase ): def setUp(self): super().setUp() self._spawn_processes() def _get_process_group(self): store = self._get_store() c10d.init_process_group( "ucc", store=store, rank=self.rank, world_size=self.world_size ) return c10d.distributed_c10d._get_default_group() def _test_ucc_backend( self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False ): process_group = self._get_process_group() self._test_ddp_with_process_group( process_group, devices, device_ids, multi_device, gradient_as_bucket_view ) @requires_ucc() def test_ucc_backend_cpu_module(self): self._test_ucc_backend([torch.device("cpu")], None) @requires_ucc() def test_ucc_backend_cpu_module_grad_is_view(self): self._test_ucc_backend( [torch.device("cpu")], None, gradient_as_bucket_view=True ) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_integer_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, int_devices) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_torch_device_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, devices) # TODO: test_ucc_backend_2gpu_module and test_ucc_backend_4gpu_module # require broadcast_coalesced which is not supported by ucc currently @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(4) def test_ucc_backend_2gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(8) def test_ucc_backend_4gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:4] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) def _test_global_local_unused_params_grad( self, gradient_as_bucket_view=False, static_graph=False ): """ By simulating a multi-task training, this test is to make sure: 1) DDP does not touch the grad of globally unused parameters. 2) DDP does update the grad of locally unused parameters. """ class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_ucc_backend_4gpu_module
def test_ucc_backend_4gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:4] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class DistributedDataParallelTest( test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase ): def setUp(self): super().setUp() self._spawn_processes() def _get_process_group(self): store = self._get_store() c10d.init_process_group( "ucc", store=store, rank=self.rank, world_size=self.world_size ) return c10d.distributed_c10d._get_default_group() def _test_ucc_backend( self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False ): process_group = self._get_process_group() self._test_ddp_with_process_group( process_group, devices, device_ids, multi_device, gradient_as_bucket_view ) @requires_ucc() def test_ucc_backend_cpu_module(self): self._test_ucc_backend([torch.device("cpu")], None) @requires_ucc() def test_ucc_backend_cpu_module_grad_is_view(self): self._test_ucc_backend( [torch.device("cpu")], None, gradient_as_bucket_view=True ) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_integer_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, int_devices) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_torch_device_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, devices) # TODO: test_ucc_backend_2gpu_module and test_ucc_backend_4gpu_module # require broadcast_coalesced which is not supported by ucc currently @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(4) def test_ucc_backend_2gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(8) def test_ucc_backend_4gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:4] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) def _test_global_local_unused_params_grad( self, gradient_as_bucket_view=False, static_graph=False ): """ By simulating a multi-task training, this test is to make sure: 1) DDP does not touch the grad of globally unused parameters. 2) DDP does update the grad of locally unused parameters. """ class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_empty_tensors
def test_empty_tensors(self): pg = self._create_process_group_ucc() xs = [torch.FloatTensor([])] fut = pg.broadcast(xs).get_future() fut.wait() output = fut.value() self.assertEqual(0, output[0].numel()) self.assertEqual(xs[0], output[0], exact_dtype=False) # TODO: add error check testing
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class ProcessGroupUCCTest(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
broadcast
def broadcast(xs, rootRank, rootTensor): opts = c10d.BroadcastOptions() opts.rootRank = rootRank opts.rootTensor = rootTensor fut = pg.broadcast(xs, opts).get_future() fut.wait() return fut.value() # Every rank is root once for i in range(self.world_size): # Run with 1 input tensor x = fn(torch.tensor([self.rank])) output = broadcast([x], i, 0) self.assertEqual(torch.tensor([i]), output[0], exact_dtype=False) # TODO: UCC currently does not support multi tensor input # Test overloaded convenience function x = torch.tensor([self.rank + 1.0]) fut = pg.broadcast(x, root=0).get_future() fut.wait() result = fut.value() self.assertEqual(torch.tensor([1.0]), result[0])
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_broadcast_basics
def test_broadcast_basics(self): self._test_broadcast_basics(lambda t: t.clone()) # TODO: test_broadcast_basics_cuda times out locally
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class ProcessGroupUCCTest(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_allreduce_basics
def test_allreduce_basics(self): self._test_allreduce_basics(lambda t: t.clone()) # TODO: test_allreduce_basics_cuda times out locally
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class ProcessGroupUCCTest(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
_test_allgather_basics
def _test_allgather_basics(self, fn): pg = self._create_process_group_ucc() # TODO: Run with N input tensor per rank; for now, UCC only supports single tensor input so N=1 for n in [1]: input = [fn(torch.tensor([n * self.rank + i])) for i in range(n)] output = [ [fn(torch.tensor([-1])) for _ in range(n * self.world_size)] for _ in range(n) ] expected_output = [ [fn(torch.tensor([i])) for i in range(n * self.world_size)] for _ in range(n) ] fut = pg.allgather(output, input).get_future() fut.wait() result = fut.value() if n == 1: result = [result] self.assertEqual(expected_output, result)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class ProcessGroupUCCTest(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_allgather_basics
def test_allgather_basics(self): self._test_allgather_basics(lambda t: t.clone())
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class ProcessGroupUCCTest(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
task_parameters
def task_parameters(self): return (self.t0.p, self.t1.p, self.task_unused.p)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
forward
def forward(self, x, rank): return self.t0(x) if rank == 0 else self.t1(x)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
task_parameters
def task_parameters(self): return (self.t0.p, self.t1.p, self.task_unused.p)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
forward
def forward(self, x, rank): return self.t0(x) if rank == 0 else self.t1(x)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
simple_reduce_tests
def simple_reduce_tests(rank, world_size): tests = [ ( c10d.ReduceOp.SUM, torch.tensor([rank + 1.0]), torch.tensor([float(world_size * (world_size + 1) / 2)]), ), ( c10d.ReduceOp.PRODUCT, torch.tensor([rank + 1.0]), torch.tensor([float(math.factorial(world_size))]), ), ( c10d.ReduceOp.MIN, torch.tensor([rank + 1.0]), torch.tensor([1.0]), ), ( c10d.ReduceOp.MAX, torch.tensor([rank + 1.0]), torch.tensor([world_size]), ), ] # Generate tests for BAND. # The bit that is set changes in every iteration to check # that the output changes accordingly. for i in range(4): vin = rank | (1 << i) vout = 1 << i tests.append( ( c10d.ReduceOp.BAND, torch.tensor([vin], dtype=torch.int32), torch.tensor([vout], dtype=torch.int32), ), ) # Generate tests for BOR. # These emulate a larger world size per iteration by having every # rank contribute multiple values that are pre-OR'ed. for i in range(1, 5): vin = reduce(operator.or_, [rank * i + j for j in range(i)]) vout = reduce(operator.or_, range(world_size * i)) tests.append( ( c10d.ReduceOp.BOR, torch.tensor([vin], dtype=torch.int32), torch.tensor([vout], dtype=torch.int32), ), ) # Generate tests for XOR. # These emulate a larger world size per iteration by having every # rank contribute multiple values that are pre-XOR'ed. for i in range(1, 5): vin = reduce(operator.xor, [rank * i + j for j in range(i)]) vout = reduce(operator.xor, range(world_size * i)) tests.append( ( c10d.ReduceOp.BXOR, torch.tensor([vin], dtype=torch.int32), torch.tensor([vout], dtype=torch.int32), ), ) return tests class RendezvousEnvTest(TestCase): @requires_ucc() @retry_on_connect_failures def test_logging_init(self): os.environ["WORLD_SIZE"] = "1" os.environ["MASTER_ADDR"] = "127.0.0.1" os.environ["MASTER_PORT"] = str(common.find_free_port()) os.environ["RANK"] = "0" previous_handlers = logging.root.handlers c10d.init_process_group(backend="ucc", init_method="env://") current_handlers = logging.root.handlers self.assertEqual(len(previous_handlers), len(current_handlers)) for current, previous in zip(current_handlers, previous_handlers): self.assertEqual(current, previous) c10d.destroy_process_group() class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase): @requires_ucc() @retry_on_connect_failures def test_default_store_timeout_ucc(self): self._test_default_store_timeout("ucc") class ProcessGroupUCCTest(MultiProcessTestCase): def _create_process_group_ucc(self): store = c10d.FileStore(self.file_name, self.world_size) return c10d.ProcessGroupUCC(store, self.rank, self.world_size) def setUp(self): super().setUp() self._spawn_processes() def tearDown(self): super().tearDown() try: os.remove(self.file_name) except OSError: pass @requires_ucc() def test_empty_tensors(self): pg = self._create_process_group_ucc() xs = [torch.FloatTensor([])] fut = pg.broadcast(xs).get_future() fut.wait() output = fut.value() self.assertEqual(0, output[0].numel()) self.assertEqual(xs[0], output[0], exact_dtype=False) # TODO: add error check testing def _test_broadcast_basics(self, fn): pg = self._create_process_group_ucc() def broadcast(xs, rootRank, rootTensor): opts = c10d.BroadcastOptions() opts.rootRank = rootRank opts.rootTensor = rootTensor fut = pg.broadcast(xs, opts).get_future() fut.wait() return fut.value() # Every rank is root once for i in range(self.world_size): # Run with 1 input tensor x = fn(torch.tensor([self.rank])) output = broadcast([x], i, 0) self.assertEqual(torch.tensor([i]), output[0], exact_dtype=False) # TODO: UCC currently does not support multi tensor input # Test overloaded convenience function x = torch.tensor([self.rank + 1.0]) fut = pg.broadcast(x, root=0).get_future() fut.wait() result = fut.value() self.assertEqual(torch.tensor([1.0]), result[0]) @requires_ucc() def test_broadcast_basics(self): self._test_broadcast_basics(lambda t: t.clone()) # TODO: test_broadcast_basics_cuda times out locally def _test_allreduce_basics(self, fn): pg = self._create_process_group_ucc() # Single input tests tests = simple_reduce_tests(self.rank, self.world_size) for op, input, expected in tests: opts = c10d.AllreduceOptions() opts.reduceOp = op tensor = fn(input) fut = pg.allreduce([tensor], opts).get_future() fut.wait() result = fut.value() self.assertEqual(expected, result[0], exact_dtype=False) # TODO: UCC currently does not support multi tensor input # Test overloaded convenience function (defaults to using sum) x = fn(torch.tensor([self.rank + 1.0])) fut = pg.allreduce(x).get_future() fut.wait() result = fut.value() self.assertEqual( torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]), result[0], ) @requires_ucc() def test_allreduce_basics(self): self._test_allreduce_basics(lambda t: t.clone()) # TODO: test_allreduce_basics_cuda times out locally def _test_allgather_basics(self, fn): pg = self._create_process_group_ucc() # TODO: Run with N input tensor per rank; for now, UCC only supports single tensor input so N=1 for n in [1]: input = [fn(torch.tensor([n * self.rank + i])) for i in range(n)] output = [ [fn(torch.tensor([-1])) for _ in range(n * self.world_size)] for _ in range(n) ] expected_output = [ [fn(torch.tensor([i])) for i in range(n * self.world_size)] for _ in range(n) ] fut = pg.allgather(output, input).get_future() fut.wait() result = fut.value() if n == 1: result = [result] self.assertEqual(expected_output, result) def test_allgather_basics(self): self._test_allgather_basics(lambda t: t.clone()) def _test_reduce_basics(self, fn): pg = self._create_process_group_ucc() for op, input, output in simple_reduce_tests(self.rank, self.world_size): for root in range(self.world_size): opts = c10d.ReduceOptions() opts.reduceOp = op opts.rootRank = root tmp = fn(input) fut = pg.reduce([tmp], opts).get_future() fut.wait() result = fut.value() if root == self.rank: self.assertEqual(output, result[0], exact_dtype=False) @requires_ucc() def test_reduce_basics(self): self._test_reduce_basics(lambda t: t.clone()) # TODO: test_reduce_basics_cuda times out locally @requires_ucc() def test_send_recv_all_to_all(self): pg = self._create_process_group_ucc() # Preallocate tensors for input/output inputs = [torch.tensor([self.rank]) for _ in range(self.world_size)] outputs = [torch.tensor([-1]) for _ in range(self.world_size)] # Issue sends send_work = [] for i in range(self.world_size): if i == self.rank: continue send_work.append(pg.send([inputs[i]], i, 0)) # Issue recvs recv_work = [] for i in range(self.world_size): if i == self.rank: continue recv_work.append(pg.recv([outputs[i]], i, 0)) # Wait for sends to complete for work in send_work: work.wait() self.assertTrue(work.is_completed()) # Wait for recvs to complete for work in recv_work: work.wait() self.assertTrue(work.is_completed()) # Test that every output other than our own contains the respective rank for i in range(self.world_size): if i == self.rank: continue self.assertEqual(torch.tensor([i]), outputs[i]) # TODO: test_barrier_implies_wait fails with numerical mismatch, will investigate later @skip_but_pass_in_sandcastle("fails with numerical mismatch, skip for now") @requires_ucc() def test_barrier_implies_wait(self): pg = self._create_process_group_ucc() # Kick off allreduce operations size = (100, 100) num = 16 tensors = [torch.full(size, float(i)) for i in range(num)] for tensor in tensors: # Note: leak the returned work handle pg.allreduce(tensor) # Barrier should ensure all previous work has completed pg.barrier().get_future().wait() for i, tensor in enumerate(tensors): self.assertEqual(torch.full(size, float(i * self.world_size)), tensor) class DistributedDataParallelTest( test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase ): def setUp(self): super().setUp() self._spawn_processes() def _get_process_group(self): store = self._get_store() c10d.init_process_group( "ucc", store=store, rank=self.rank, world_size=self.world_size ) return c10d.distributed_c10d._get_default_group() def _test_ucc_backend( self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False ): process_group = self._get_process_group() self._test_ddp_with_process_group( process_group, devices, device_ids, multi_device, gradient_as_bucket_view ) @requires_ucc() def test_ucc_backend_cpu_module(self): self._test_ucc_backend([torch.device("cpu")], None) @requires_ucc() def test_ucc_backend_cpu_module_grad_is_view(self): self._test_ucc_backend( [torch.device("cpu")], None, gradient_as_bucket_view=True ) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_integer_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, int_devices) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_torch_device_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, devices) # TODO: test_ucc_backend_2gpu_module and test_ucc_backend_4gpu_module # require broadcast_coalesced which is not supported by ucc currently @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(4) def test_ucc_backend_2gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(8) def test_ucc_backend_4gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:4] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) def _test_global_local_unused_params_grad( self, gradient_as_bucket_view=False, static_graph=False ): """ By simulating a multi-task training, this test is to make sure: 1) DDP does not touch the grad of globally unused parameters. 2) DDP does update the grad of locally unused parameters. """ class GlobalLocalUnusedParamModule(nn.Module): def __init__(self) -> None: super().__init__() self.t0 = Task() self.t1 = Task() self.task_unused = Task() def task_parameters(self): return (self.t0.p, self.t1.p, self.task_unused.p) def forward(self, x, rank): return self.t0(x) if rank == 0 else self.t1(x) def run_and_verify_grad(model): # Run forward output = model(8, self.rank) # The grads of all parameters should be None at this point. t0_p, t1_p, task_unused_p = model.module.task_parameters() self.assertIsNone(t0_p.grad) self.assertIsNone(t1_p.grad) self.assertIsNone(task_unused_p.grad) # Run backward output.mean().backward() # Now locally unused parameter should have grad updated on all ranks. # However the globally unused parameter should still have None grad. self.assertIsNotNone(t0_p.grad) self.assertIsNotNone(t1_p.grad) self.assertIsNone(task_unused_p.grad) process_group = self._get_process_group() # Test on CPU cpu_model = DistributedDataParallel( GlobalLocalUnusedParamModule().cpu(), process_group=process_group, find_unused_parameters=True, gradient_as_bucket_view=gradient_as_bucket_view, static_graph=static_graph, ) run_and_verify_grad(cpu_model) # Test on GPU device_id = gpus_for_rank(self.world_size)[self.rank][0] gpu_model = DistributedDataParallel( GlobalLocalUnusedParamModule().to(device_id), device_ids=[device_id], process_group=process_group, find_unused_parameters=True, gradient_as_bucket_view=gradient_as_bucket_view, static_graph=static_graph, ) run_and_verify_grad(gpu_model) # TODO: times out @skip_but_pass_in_sandcastle("times out") @requires_ucc() @skip_if_lt_x_gpu(2) def test_global_local_unused_params_grad(self): self._test_global_local_unused_params_grad() # TODO: times out @skip_but_pass_in_sandcastle("times out") @requires_ucc() @skip_if_lt_x_gpu(2) def test_global_local_unused_params_grad_with_grad_is_view(self): self._test_global_local_unused_params_grad(gradient_as_bucket_view=True) # TODO: times out @skip_but_pass_in_sandcastle("times out") @requires_ucc() @skip_if_lt_x_gpu(2) def test_global_local_unused_params_grad_with_static_graph(self): self._test_global_local_unused_params_grad(static_graph=True) # TODO: times out @skip_but_pass_in_sandcastle("times out") @requires_ucc() @skip_if_lt_x_gpu(2) def test_find_unused_parameters_when_unused_parameters_empty(self): """ An empty unused_parameters array does not imply find_unused_parameters = false. This test makes sure that DDP allreduces unused parameters accordingly where the forward pass in some process uses all parameters. This unit test creates a module that uses all parameters in rank = 0, and has unused parameters in other ranks. """ class FindUnusedParamModule(nn.Module): def __init__(self) -> None: super().__init__() self.t0 = Task() self.t1 = Task() def task_parameters(self): return (self.t0.p, self.t1.p) def forward(self, x, rank): return self.t1(self.t0(x)) if rank == 0 else self.t1(x) def run_and_verify_grad(model): # Run forward output = model(8, self.rank) # The grads of all parameters should be None at this point. [self.assertIsNone(t_p.grad) for t_p in model.module.task_parameters()] # Run backward output.mean().backward() # Now locally unused parameter should have grad updated on all ranks. [self.assertIsNotNone(t_p.grad) for t_p in model.module.task_parameters()] process_group = self._get_process_group() # Test on CPU cpu_model = DistributedDataParallel( FindUnusedParamModule().cpu(), process_group=process_group, find_unused_parameters=True, ) run_and_verify_grad(cpu_model) # Test on GPU device_id = gpus_for_rank(self.world_size)[self.rank][0] gpu_model = DistributedDataParallel( FindUnusedParamModule().to(device_id), device_ids=[device_id], process_group=process_group, find_unused_parameters=True, ) run_and_verify_grad(gpu_model) @requires_ucc() def test_ignored_output(self): """ Test that the output of a model can be ignored and that there is no implicit requirement that `backward` gets called. """ process_group = self._get_process_group() class IgnoredOutput(nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) return F.softmax(x, dim=1) model = DistributedDataParallel( IgnoredOutput().float(), process_group=process_group, ) batch_size = 4 criterion = nn.CrossEntropyLoss() input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]) # Run a few iterations where we ignore the output. for _ in range(4): output = model(input) del output # Run a few iterations where we use the output. for _ in range(4): output = model(input) loss = criterion(output, target) loss.backward() @requires_ucc() def test_ignored_output_with_unused_parameters(self): """ Test that the output of a model can be ignored and that there is no implicit requirement that `backward` gets called, if not all model parameters participated in computing the model output. """ process_group = self._get_process_group() class IgnoredOutputWithUnusedParameters(nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.fc3 = nn.Linear(4, 4, bias=False) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) return F.softmax(x, dim=1) model = DistributedDataParallel( IgnoredOutputWithUnusedParameters().float(), process_group=process_group, find_unused_parameters=True, ) batch_size = 4 criterion = nn.CrossEntropyLoss() input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]) # Run a few iterations where we ignore the output. for _ in range(4): output = model(input) del output # Run a few iterations where we use the output. for _ in range(4): output = model(input) loss = criterion(output, target) loss.backward() def _run_and_verify_sparse_gradients(self, vanilla_model, ddp_model): mult = 2 batch_size = mult * self.world_size criterion = nn.CrossEntropyLoss() input = torch.randint(0, 10, [batch_size, 2]) target = torch.randint(0, 10, [batch_size]) # Run with entire batch against single process version criterion(vanilla_model(input), target).backward() # Run with partial batch against multi process version partial_input = input.split(mult)[self.rank] partial_target = target.split(mult)[self.rank] criterion(ddp_model(partial_input), partial_target).backward() # Check that the gradients are sparse and identical vanilla_parameter = next(vanilla_model.parameters()) ddp_parameter = next(ddp_model.parameters()) self.assertEqual( vanilla_parameter.grad.coalesce(), ddp_parameter.grad.coalesce() ) @requires_ucc() @skip_if_lt_x_gpu(2) def test_save_load_checkpoint(self): dist.init_process_group( "ucc", init_method=f"file://{self.file_name}", world_size=self.world_size, rank=self.rank, ) class TestModel(nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) return F.softmax(x, dim=1) def train_loop(model, optimizer, iterations): for _ in range(iterations): optimizer.zero_grad() output = model(input) loss = criterion(output, target) loss.backward() optimizer.step() device_id = gpus_for_rank(self.world_size)[self.rank][0] model_withload = TestModel().float().to(device_id) model_withoutload = TestModel().float().to(device_id) ddp_withload = DistributedDataParallel( model_withload, device_ids=[device_id], ) ddp_withoutload = DistributedDataParallel( model_withoutload, device_ids=[device_id], ) # ensure that all the three models start with the same set of parameters. By default they are randomized on construction for p in ddp_withload.parameters(): with torch.no_grad(): p.zero_() for p in model_withload.parameters(): with torch.no_grad(): p.zero_() for p in ddp_withoutload.parameters(): with torch.no_grad(): p.zero_() batch_size = 4 criterion = nn.CrossEntropyLoss() optimizer_withload = torch.optim.SGD(ddp_withload.parameters(), lr=0.001) optimizer_non_ddp_withload = torch.optim.SGD( model_withload.parameters(), lr=0.001 ) optimizer_withoutload = torch.optim.SGD(ddp_withoutload.parameters(), lr=0.001) input = torch.rand([batch_size, 2], dtype=torch.float).to(device_id) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) # run the model for 6 iterations, with a checkpoint in the middle train_loop(ddp_withload, optimizer_withload, 3) # zero out parameters of both DDP and non-DDP models and reload them from the DDP state dict checkpoint_path = tempfile.gettempdir() + "/model.checkpoint" if self.rank == 0: torch.save(ddp_withload.state_dict(), checkpoint_path) dist.barrier() map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank} ddp_state_dict = torch.load(checkpoint_path, map_location=map_location) for model in [ddp_withload, model_withload]: for p in ddp_withload.parameters(): with torch.no_grad(): p.zero_() ddp_withload.load_state_dict(ddp_state_dict) # the non-DDP model needs to first remove the prefix of "module." from the DDP state dict torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( ddp_state_dict, "module." ) model_withload.load_state_dict(ddp_state_dict) train_loop(ddp_withload, optimizer_withload, 3) train_loop(model_withload, optimizer_non_ddp_withload, 3) # re-run the model with the same inputs for 6 iterations with no checkpoint train_loop(ddp_withoutload, optimizer_withoutload, 6) for p_withload, p_withoutload, p_non_ddp_withload in zip( ddp_withload.parameters(), ddp_withoutload.parameters(), model_withload.parameters(), ): self.assertEqual(p_withload, p_withoutload) self.assertEqual(p_non_ddp_withload, p_withoutload) def _test_sparse_gradients(self, gradient_as_bucket_view=False): process_group = self._get_process_group() # Ensure initialized weights and inputs are identical across processes torch.manual_seed(1337) vanilla_model = SparseGradientModule() ddp_model = DistributedDataParallel( copy.deepcopy(vanilla_model), process_group=process_group, gradient_as_bucket_view=gradient_as_bucket_view, ) self._run_and_verify_sparse_gradients(vanilla_model, ddp_model) # TODO: backward pass: input tensor has to be dense @skip_but_pass_in_sandcastle("backward pass: input tensor has to be dense") @requires_ucc() def test_sparse_gradients(self): self._test_sparse_gradients() # TODO: backward pass: input tensor has to be dense @skip_but_pass_in_sandcastle("backward pass: input tensor has to be dense") @requires_ucc() def test_sparse_gradients_grad_is_view(self): self._test_sparse_gradients(gradient_as_bucket_view=True) @requires_ucc() def test_ddp_comm_hook_future_passing_cpu(self): """ This unit test verifies whether the Future object is passed properly. The callback function creates a Future object and sets a value to it. """ process_group = self._get_process_group() # Test on CPU cpu_model = DistributedDataParallel( ModuleForDdpCommHook().cpu(), process_group=process_group ) # Register DDP Communication Hook cpu_model.register_comm_hook(None, self._simple_hook) # check whether the grads are equal to what then callback returns. # without the comm_hook, result would be 0.25 * torch.ones(2, 2). self._run_and_verify_hook(cpu_model, 8, 2 * torch.ones(2, 2)) def _gpu_model_with_ddp_comm_hook( self, process_group, hook=None, gradient_as_bucket_view=False, state=None ): device_id = gpus_for_rank(self.world_size)[self.rank][0] gpu_model = DistributedDataParallel( ModuleForDdpCommHook().to(device_id), device_ids=[device_id], process_group=process_group, gradient_as_bucket_view=gradient_as_bucket_view, ) # Register a DDP communication hook if any. if hook is not None: gpu_model.register_comm_hook(state, hook) return gpu_model @requires_ucc() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_future_passing_gpu_ucc(self): """ This unit test verifies whether the Future object is passed properly using ucc backend. The hook callback function creates a Future object and sets a value to it. """ process_group = self._get_process_group() # Get GPU model with simple_hook registered. gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook) # check whether the grads are equal to what simple_hook's then callback returns. # without the comm_hook, result would be 0.25 * torch.ones(2, 2). self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2)) @requires_ucc() def test_ddp_invalid_comm_hook_init(self): """ This unit test makes sure that register_comm_hook properly checks the format of hook defined by user. The Python hook must be callable. This test also checks whether bucket annotation checked properly if defined. """ process_group = self._get_process_group() model = DistributedDataParallel( ModuleForDdpCommHook(), process_group=process_group ) with self.assertRaisesRegex(TypeError, "Communication hook must be callable."): model.register_comm_hook(state=None, hook=1) with self.assertRaisesRegex( ValueError, "bucket annotation should be dist.GradBucket." ): def comm_hook( state: object, bucket: int ) -> torch.futures.Future[torch.Tensor]: return torch.futures.Future() model.register_comm_hook(state=None, hook=comm_hook) @requires_ucc() def test_ddp_invalid_comm_hook_return_type(self): """ This test checks whether return annotation checked properly if defined. It also checks whether an internal error is thrown if return type is incorrect and user hasn't specified any return type annotation. """ process_group = self._get_process_group() model = DistributedDataParallel( ModuleForDdpCommHook(), process_group=process_group ) expected_err = ( "Communication hook: return annotation should be torch.futures.Future" ) with self.assertRaisesRegex( ValueError, expected_err, ): def comm_hook(state: object, bucket: dist.GradBucket) -> int: return torch.futures.Future() model.register_comm_hook(state=None, hook=comm_hook) verify_ddp_error_logged(model, expected_err) with self.assertRaisesRegex( RuntimeError, "callback must return a torch.futures.Future object, but got", ): def comm_hook(state: object, bucket: dist.GradBucket): return 1 model.register_comm_hook(state=None, hook=comm_hook) # Run forward output = model(8, self.rank) # Run backward output.mean().backward() @requires_ucc() def test_ddp_comm_hook_register_just_once(self): """ DDP communication hook can only be registered once. This test validates whether the error is thrown properly when register_comm_hook is called more than once. """ process_group = self._get_process_group() model = DistributedDataParallel( ModuleForDdpCommHook(), process_group=process_group ) def dummy_hook(state, bucket): fut = torch.futures.Future() fut.set_result([bucket.buffer()]) return fut model.register_comm_hook(None, dummy_hook) with self.assertRaisesRegex( RuntimeError, "register_comm_hook or register_builtin_comm_hook can only be called once.", ): model.register_comm_hook(None, dummy_hook) # TODO: backward pass: input tensor must be dense @skip_but_pass_in_sandcastle("backward pass: input tensor has to be dense") @requires_ucc() def test_ddp_comm_hook_sparse_gradients(self): """ Runs "test_sparse_gradients" unit test with DDP communication hook. We define a simple hook that does allreduce and works with ucc backend for this test. """ process_group = self._get_process_group() # Ensure initialized weights and inputs are identical across processes torch.manual_seed(1337) vanilla_model = SparseGradientModule() ddp_model = DistributedDataParallel( copy.deepcopy(vanilla_model), process_group=process_group, ) def allreduce_hook_ucc( state: object, bucket: dist.GradBucket ) -> torch.futures.Future[torch.Tensor]: def div_by_world_size(fut): # Divide the result by 2 * world_size. return fut.wait()[0] / self.world_size # Prepare allreduced grad bucket tensors by running an async work. fut = process_group.allreduce([bucket.buffer()]).get_future() return fut.then(div_by_world_size) ddp_model.register_comm_hook(None, allreduce_hook_ucc) self._run_and_verify_sparse_gradients(vanilla_model, ddp_model) class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase): @property def device(self): return "cpu" def setUp(self): super().setUp() self._spawn_processes() def tearDown(self): super().tearDown() try: os.remove(self.file_name) except OSError: pass @requires_ucc() @skip_if_lt_x_gpu(2) def test_sequence_num_set_default_pg_ucc(self): self._test_sequence_num_set_default_pg(backend="ucc") @requires_ucc() @skip_if_lt_x_gpu(2) def test_sequence_num_set_ucc_new_group(self): self._test_sequence_num_set_new_group(backend="ucc") @skip_if_lt_x_gpu(2) @requires_ucc() def test_sequence_num_incremented_ucc_default(self): self._test_sequence_num_incremented_default_group("ucc") @skip_if_lt_x_gpu(4) @requires_ucc() def test_sequence_num_incremented_ucc_subgroup(self): if self.world_size < 4: return skip_but_pass_in_sandcastle("Test requires world_size of at least 4") self._test_sequence_num_incremented_subgroup("ucc") @skip_but_pass_in_sandcastle("Fails on M60") @requires_ucc() def test_ucc_barrier_device_ids(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="ucc", rank=self.rank, world_size=self.world_size, store=store ) with self.assertRaisesRegex(RuntimeError, "device_ids not supported"): c10d.barrier(device_ids=[self.rank]) @skip_but_pass_in_sandcastle("Fails on M60") @skip_if_lt_x_gpu(2) @requires_ucc() def test_ucc_warn_not_in_group(self): self._test_warn_not_in_group(backend="ucc") @skip_if_lt_x_gpu(2) @requires_ucc() def test_ucc_rank_membership(self): self._test_rank_membership(backend="ucc") @skip_if_lt_x_gpu(2) @requires_ucc() def test_tensor_dtype_mismatch(self): self._test_tensor_dtype_mismatch(backend="ucc") @skip_if_lt_x_gpu(2) @requires_ucc() def test_tensor_dtype_complex(self): self._test_tensor_dtype_complex(backend="ucc") class UccProcessGroupWithDispatchedCollectivesTests( test_c10d_common.ProcessGroupWithDispatchedCollectivesTests ): @skip_but_pass_in_sandcastle("Fails on M60") @requires_ucc() @skip_if_lt_x_gpu(1) def test_collectives(self): # includes reduce, broadcast, all_reduce, all_gather, reduce_scatter, barrier, all_to_all, scatter self._test_collectives(backend="ucc") @skip_but_pass_in_sandcastle("Fails on M60") @requires_ucc() @skip_if_lt_x_gpu(1) def test_allgather_base(self): store = dist.FileStore(self.file_name, self.world_size) dist.init_process_group( "ucc", world_size=self.world_size, rank=self.rank, store=store, ) device = "cuda" tensor = torch.ones(10, 10, device=torch.device(device)) output_tensor = torch.zeros(10, 10, device=torch.device(device)) dist.all_gather_into_tensor(output_tensor, tensor) self.assertEqual(output_tensor, tensor) if __name__ == "__main__": assert ( not torch.cuda._initialized ), "test_distributed must not have initialized CUDA context on main process" run_tests()
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_logging_init
def test_logging_init(self): os.environ["WORLD_SIZE"] = "1" os.environ["MASTER_ADDR"] = "127.0.0.1" os.environ["MASTER_PORT"] = str(common.find_free_port()) os.environ["RANK"] = "0" previous_handlers = logging.root.handlers c10d.init_process_group(backend="ucc", init_method="env://") current_handlers = logging.root.handlers self.assertEqual(len(previous_handlers), len(current_handlers)) for current, previous in zip(current_handlers, previous_handlers): self.assertEqual(current, previous) c10d.destroy_process_group()
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class RendezvousEnvTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_default_store_timeout_ucc
def test_default_store_timeout_ucc(self): self._test_default_store_timeout("ucc")
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
_create_process_group_ucc
def _create_process_group_ucc(self): store = c10d.FileStore(self.file_name, self.world_size) return c10d.ProcessGroupUCC(store, self.rank, self.world_size)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class ProcessGroupUCCTest(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
setUp
def setUp(self): super().setUp() self._spawn_processes()
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class ProcessGroupUCCTest(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
forward
def forward(self, x, rank): return self.t0(x) if rank == 0 else self.t1(x)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_ignored_output_with_unused_parameters
def test_ignored_output_with_unused_parameters(self): """ Test that the output of a model can be ignored and that there is no implicit requirement that `backward` gets called, if not all model parameters participated in computing the model output. """ process_group = self._get_process_group() class IgnoredOutputWithUnusedParameters(nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.fc3 = nn.Linear(4, 4, bias=False) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) return F.softmax(x, dim=1) model = DistributedDataParallel( IgnoredOutputWithUnusedParameters().float(), process_group=process_group, find_unused_parameters=True, ) batch_size = 4 criterion = nn.CrossEntropyLoss() input = torch.rand([batch_size, 2], dtype=torch.float) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]) # Run a few iterations where we ignore the output. for _ in range(4): output = model(input) del output # Run a few iterations where we use the output. for _ in range(4): output = model(input) loss = criterion(output, target) loss.backward()
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class DistributedDataParallelTest( test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase ): def setUp(self): super().setUp() self._spawn_processes() def _get_process_group(self): store = self._get_store() c10d.init_process_group( "ucc", store=store, rank=self.rank, world_size=self.world_size ) return c10d.distributed_c10d._get_default_group() def _test_ucc_backend( self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False ): process_group = self._get_process_group() self._test_ddp_with_process_group( process_group, devices, device_ids, multi_device, gradient_as_bucket_view ) @requires_ucc() def test_ucc_backend_cpu_module(self): self._test_ucc_backend([torch.device("cpu")], None) @requires_ucc() def test_ucc_backend_cpu_module_grad_is_view(self): self._test_ucc_backend( [torch.device("cpu")], None, gradient_as_bucket_view=True ) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_integer_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, int_devices) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_torch_device_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, devices) # TODO: test_ucc_backend_2gpu_module and test_ucc_backend_4gpu_module # require broadcast_coalesced which is not supported by ucc currently @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(4) def test_ucc_backend_2gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(8) def test_ucc_backend_4gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:4] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) def _test_global_local_unused_params_grad( self, gradient_as_bucket_view=False, static_graph=False ): """ By simulating a multi-task training, this test is to make sure: 1) DDP does not touch the grad of globally unused parameters. 2) DDP does update the grad of locally unused parameters. """ class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
forward
def forward(self, x, rank): return self.t0(x) if rank == 0 else self.t1(x)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_save_load_checkpoint
def test_save_load_checkpoint(self): dist.init_process_group( "ucc", init_method=f"file://{self.file_name}", world_size=self.world_size, rank=self.rank, ) class TestModel(nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = nn.Linear(10, 4, bias=False) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) return F.softmax(x, dim=1) def train_loop(model, optimizer, iterations): for _ in range(iterations): optimizer.zero_grad() output = model(input) loss = criterion(output, target) loss.backward() optimizer.step() device_id = gpus_for_rank(self.world_size)[self.rank][0] model_withload = TestModel().float().to(device_id) model_withoutload = TestModel().float().to(device_id) ddp_withload = DistributedDataParallel( model_withload, device_ids=[device_id], ) ddp_withoutload = DistributedDataParallel( model_withoutload, device_ids=[device_id], ) # ensure that all the three models start with the same set of parameters. By default they are randomized on construction for p in ddp_withload.parameters(): with torch.no_grad(): p.zero_() for p in model_withload.parameters(): with torch.no_grad(): p.zero_() for p in ddp_withoutload.parameters(): with torch.no_grad(): p.zero_() batch_size = 4 criterion = nn.CrossEntropyLoss() optimizer_withload = torch.optim.SGD(ddp_withload.parameters(), lr=0.001) optimizer_non_ddp_withload = torch.optim.SGD( model_withload.parameters(), lr=0.001 ) optimizer_withoutload = torch.optim.SGD(ddp_withoutload.parameters(), lr=0.001) input = torch.rand([batch_size, 2], dtype=torch.float).to(device_id) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) # run the model for 6 iterations, with a checkpoint in the middle train_loop(ddp_withload, optimizer_withload, 3) # zero out parameters of both DDP and non-DDP models and reload them from the DDP state dict checkpoint_path = tempfile.gettempdir() + "/model.checkpoint" if self.rank == 0: torch.save(ddp_withload.state_dict(), checkpoint_path) dist.barrier() map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank} ddp_state_dict = torch.load(checkpoint_path, map_location=map_location) for model in [ddp_withload, model_withload]: for p in ddp_withload.parameters(): with torch.no_grad(): p.zero_() ddp_withload.load_state_dict(ddp_state_dict) # the non-DDP model needs to first remove the prefix of "module." from the DDP state dict torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( ddp_state_dict, "module." ) model_withload.load_state_dict(ddp_state_dict) train_loop(ddp_withload, optimizer_withload, 3) train_loop(model_withload, optimizer_non_ddp_withload, 3) # re-run the model with the same inputs for 6 iterations with no checkpoint train_loop(ddp_withoutload, optimizer_withoutload, 6) for p_withload, p_withoutload, p_non_ddp_withload in zip( ddp_withload.parameters(), ddp_withoutload.parameters(), model_withload.parameters(), ): self.assertEqual(p_withload, p_withoutload) self.assertEqual(p_non_ddp_withload, p_withoutload)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class DistributedDataParallelTest( test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase ): def setUp(self): super().setUp() self._spawn_processes() def _get_process_group(self): store = self._get_store() c10d.init_process_group( "ucc", store=store, rank=self.rank, world_size=self.world_size ) return c10d.distributed_c10d._get_default_group() def _test_ucc_backend( self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False ): process_group = self._get_process_group() self._test_ddp_with_process_group( process_group, devices, device_ids, multi_device, gradient_as_bucket_view ) @requires_ucc() def test_ucc_backend_cpu_module(self): self._test_ucc_backend([torch.device("cpu")], None) @requires_ucc() def test_ucc_backend_cpu_module_grad_is_view(self): self._test_ucc_backend( [torch.device("cpu")], None, gradient_as_bucket_view=True ) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_integer_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, int_devices) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_torch_device_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, devices) # TODO: test_ucc_backend_2gpu_module and test_ucc_backend_4gpu_module # require broadcast_coalesced which is not supported by ucc currently @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(4) def test_ucc_backend_2gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(8) def test_ucc_backend_4gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:4] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) def _test_global_local_unused_params_grad( self, gradient_as_bucket_view=False, static_graph=False ): """ By simulating a multi-task training, this test is to make sure: 1) DDP does not touch the grad of globally unused parameters. 2) DDP does update the grad of locally unused parameters. """ class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
forward
def forward(self, x, rank): return self.t0(x) if rank == 0 else self.t1(x)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
train_loop
def train_loop(model, optimizer, iterations): for _ in range(iterations): optimizer.zero_grad() output = model(input) loss = criterion(output, target) loss.backward() optimizer.step() device_id = gpus_for_rank(self.world_size)[self.rank][0] model_withload = TestModel().float().to(device_id) model_withoutload = TestModel().float().to(device_id) ddp_withload = DistributedDataParallel( model_withload, device_ids=[device_id], ) ddp_withoutload = DistributedDataParallel( model_withoutload, device_ids=[device_id], ) # ensure that all the three models start with the same set of parameters. By default they are randomized on construction for p in ddp_withload.parameters(): with torch.no_grad(): p.zero_() for p in model_withload.parameters(): with torch.no_grad(): p.zero_() for p in ddp_withoutload.parameters(): with torch.no_grad(): p.zero_() batch_size = 4 criterion = nn.CrossEntropyLoss() optimizer_withload = torch.optim.SGD(ddp_withload.parameters(), lr=0.001) optimizer_non_ddp_withload = torch.optim.SGD( model_withload.parameters(), lr=0.001 ) optimizer_withoutload = torch.optim.SGD(ddp_withoutload.parameters(), lr=0.001) input = torch.rand([batch_size, 2], dtype=torch.float).to(device_id) target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to( device_id ) # run the model for 6 iterations, with a checkpoint in the middle train_loop(ddp_withload, optimizer_withload, 3) # zero out parameters of both DDP and non-DDP models and reload them from the DDP state dict checkpoint_path = tempfile.gettempdir() + "/model.checkpoint" if self.rank == 0: torch.save(ddp_withload.state_dict(), checkpoint_path) dist.barrier() map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank} ddp_state_dict = torch.load(checkpoint_path, map_location=map_location) for model in [ddp_withload, model_withload]: for p in ddp_withload.parameters(): with torch.no_grad(): p.zero_() ddp_withload.load_state_dict(ddp_state_dict) # the non-DDP model needs to first remove the prefix of "module." from the DDP state dict torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( ddp_state_dict, "module." ) model_withload.load_state_dict(ddp_state_dict) train_loop(ddp_withload, optimizer_withload, 3) train_loop(model_withload, optimizer_non_ddp_withload, 3) # re-run the model with the same inputs for 6 iterations with no checkpoint train_loop(ddp_withoutload, optimizer_withoutload, 6) for p_withload, p_withoutload, p_non_ddp_withload in zip( ddp_withload.parameters(), ddp_withoutload.parameters(), model_withload.parameters(), ): self.assertEqual(p_withload, p_withoutload) self.assertEqual(p_non_ddp_withload, p_withoutload)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_ddp_comm_hook_future_passing_cpu
def test_ddp_comm_hook_future_passing_cpu(self): """ This unit test verifies whether the Future object is passed properly. The callback function creates a Future object and sets a value to it. """ process_group = self._get_process_group() # Test on CPU cpu_model = DistributedDataParallel( ModuleForDdpCommHook().cpu(), process_group=process_group ) # Register DDP Communication Hook cpu_model.register_comm_hook(None, self._simple_hook) # check whether the grads are equal to what then callback returns. # without the comm_hook, result would be 0.25 * torch.ones(2, 2). self._run_and_verify_hook(cpu_model, 8, 2 * torch.ones(2, 2))
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class DistributedDataParallelTest( test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase ): def setUp(self): super().setUp() self._spawn_processes() def _get_process_group(self): store = self._get_store() c10d.init_process_group( "ucc", store=store, rank=self.rank, world_size=self.world_size ) return c10d.distributed_c10d._get_default_group() def _test_ucc_backend( self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False ): process_group = self._get_process_group() self._test_ddp_with_process_group( process_group, devices, device_ids, multi_device, gradient_as_bucket_view ) @requires_ucc() def test_ucc_backend_cpu_module(self): self._test_ucc_backend([torch.device("cpu")], None) @requires_ucc() def test_ucc_backend_cpu_module_grad_is_view(self): self._test_ucc_backend( [torch.device("cpu")], None, gradient_as_bucket_view=True ) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_integer_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, int_devices) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_torch_device_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, devices) # TODO: test_ucc_backend_2gpu_module and test_ucc_backend_4gpu_module # require broadcast_coalesced which is not supported by ucc currently @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(4) def test_ucc_backend_2gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(8) def test_ucc_backend_4gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:4] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) def _test_global_local_unused_params_grad( self, gradient_as_bucket_view=False, static_graph=False ): """ By simulating a multi-task training, this test is to make sure: 1) DDP does not touch the grad of globally unused parameters. 2) DDP does update the grad of locally unused parameters. """ class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
comm_hook
def comm_hook( state: object, bucket: int ) -> torch.futures.Future[torch.Tensor]: return torch.futures.Future() model.register_comm_hook(state=None, hook=comm_hook)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_ddp_comm_hook_sparse_gradients
def test_ddp_comm_hook_sparse_gradients(self): """ Runs "test_sparse_gradients" unit test with DDP communication hook. We define a simple hook that does allreduce and works with ucc backend for this test. """ process_group = self._get_process_group() # Ensure initialized weights and inputs are identical across processes torch.manual_seed(1337) vanilla_model = SparseGradientModule() ddp_model = DistributedDataParallel( copy.deepcopy(vanilla_model), process_group=process_group, ) def allreduce_hook_ucc( state: object, bucket: dist.GradBucket ) -> torch.futures.Future[torch.Tensor]: def div_by_world_size(fut): # Divide the result by 2 * world_size. return fut.wait()[0] / self.world_size # Prepare allreduced grad bucket tensors by running an async work. fut = process_group.allreduce([bucket.buffer()]).get_future() return fut.then(div_by_world_size) ddp_model.register_comm_hook(None, allreduce_hook_ucc) self._run_and_verify_sparse_gradients(vanilla_model, ddp_model)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class DistributedDataParallelTest( test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase ): def setUp(self): super().setUp() self._spawn_processes() def _get_process_group(self): store = self._get_store() c10d.init_process_group( "ucc", store=store, rank=self.rank, world_size=self.world_size ) return c10d.distributed_c10d._get_default_group() def _test_ucc_backend( self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False ): process_group = self._get_process_group() self._test_ddp_with_process_group( process_group, devices, device_ids, multi_device, gradient_as_bucket_view ) @requires_ucc() def test_ucc_backend_cpu_module(self): self._test_ucc_backend([torch.device("cpu")], None) @requires_ucc() def test_ucc_backend_cpu_module_grad_is_view(self): self._test_ucc_backend( [torch.device("cpu")], None, gradient_as_bucket_view=True ) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_integer_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, int_devices) @requires_ucc() @skip_if_lt_x_gpu(2) def test_ucc_backend_1gpu_module_device_ids_torch_device_list(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, devices) # TODO: test_ucc_backend_2gpu_module and test_ucc_backend_4gpu_module # require broadcast_coalesced which is not supported by ucc currently @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(4) def test_ucc_backend_2gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:2] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) @skip_but_pass_in_sandcastle( "requires broadcast coalesced, which is not supported by ucc currently" ) @requires_ucc() @skip_if_lt_x_gpu(8) def test_ucc_backend_4gpu_module(self): int_devices = gpus_for_rank(self.world_size)[self.rank][:4] devices = [torch.device("cuda:" + str(i)) for i in int_devices] self._test_ucc_backend(devices, None, multi_device=True) def _test_global_local_unused_params_grad( self, gradient_as_bucket_view=False, static_graph=False ): """ By simulating a multi-task training, this test is to make sure: 1) DDP does not touch the grad of globally unused parameters. 2) DDP does update the grad of locally unused parameters. """ class GlobalLocalUnusedParamModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
div_by_world_size
def div_by_world_size(fut): # Divide the result by 2 * world_size. return fut.wait()[0] / self.world_size # Prepare allreduced grad bucket tensors by running an async work. fut = process_group.allreduce([bucket.buffer()]).get_future() return fut.then(div_by_world_size)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
device
def device(self): return "cpu"
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
setUp
def setUp(self): super().setUp() self._spawn_processes()
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class ProcessGroupUCCTest(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_compute_comm_reordering.py
func
def func(a): ar = _functional_collectives.all_reduce(a, "sum", "0") b = torch.matmul(a, a) return torch.matmul(ar, b) with _dynamo_dist_per_rank_init( self.rank, self.world_size, fake_pg=not at_least_x_gpu(2) ): inputs = torch.ones(4, 4, dtype=torch.float, device="cuda") + self.rank compiled = torch.compile(func) code = run_and_get_triton_code(compiled, inputs) # Verify that the wait_tensor is sinked below the 1st matmul but # above the 2nd matmul. ( FileCheck() .check("torch.ops._c10d_functional.all_reduce_.default") .check("extern_kernels.mm") .check("torch.ops._c10d_functional.wait_tensor.default") .check("extern_kernels.mm") .run(code) ) out = compiled(inputs) correct = func(inputs) self.assertTrue(same(out, correct))
import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.utils import same from torch._inductor import ir, scheduler from torch._inductor.comm_analysis import ( baseLat, hwLat, llMaxBws, NCCL_ALGO, NCCL_HW, NCCL_PROTO, NVIDIA_GPU_TYPE, ) from torch._inductor.utils import run_and_get_triton_code from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, at_least_x_gpu, DynamoDistributedMultiProcTestCase, requires_nccl, ) from torch.utils._triton import has_triton from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_compute_comm_reordering.py
func
def func(a): ar = _functional_collectives.all_reduce(a, "sum", "0") b = torch.matmul(a, a) return torch.matmul(ar, b) with _dynamo_dist_per_rank_init( self.rank, self.world_size, fake_pg=not at_least_x_gpu(2) ): inputs = torch.ones(4, 4, dtype=torch.float, device="cuda") + self.rank compiled = torch.compile(func) code = run_and_get_triton_code(compiled, inputs) # Verify that the wait_tensor is sinked below the 1st matmul but # above the 2nd matmul. ( FileCheck() .check("torch.ops._c10d_functional.all_reduce_.default") .check("extern_kernels.mm") .check("torch.ops._c10d_functional.wait_tensor.default") .check("extern_kernels.mm") .run(code) ) out = compiled(inputs) correct = func(inputs) self.assertTrue(same(out, correct))
import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.utils import same from torch._inductor import ir, scheduler from torch._inductor.comm_analysis import ( baseLat, hwLat, llMaxBws, NCCL_ALGO, NCCL_HW, NCCL_PROTO, NVIDIA_GPU_TYPE, ) from torch._inductor.utils import run_and_get_triton_code from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, at_least_x_gpu, DynamoDistributedMultiProcTestCase, requires_nccl, ) from torch.utils._triton import has_triton from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_compute_comm_reordering.py
func
def func(a): ar = _functional_collectives.all_reduce(a, "sum", "0") b = torch.matmul(a, a) return torch.matmul(ar, b) with _dynamo_dist_per_rank_init( self.rank, self.world_size, fake_pg=not at_least_x_gpu(2) ): inputs = torch.ones(4, 4, dtype=torch.float, device="cuda") + self.rank compiled = torch.compile(func) code = run_and_get_triton_code(compiled, inputs) # Verify that the wait_tensor is sinked below the 1st matmul but # above the 2nd matmul. ( FileCheck() .check("torch.ops._c10d_functional.all_reduce_.default") .check("extern_kernels.mm") .check("torch.ops._c10d_functional.wait_tensor.default") .check("extern_kernels.mm") .run(code) ) out = compiled(inputs) correct = func(inputs) self.assertTrue(same(out, correct))
import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.utils import same from torch._inductor import ir, scheduler from torch._inductor.comm_analysis import ( baseLat, hwLat, llMaxBws, NCCL_ALGO, NCCL_HW, NCCL_PROTO, NVIDIA_GPU_TYPE, ) from torch._inductor.utils import run_and_get_triton_code from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, at_least_x_gpu, DynamoDistributedMultiProcTestCase, requires_nccl, ) from torch.utils._triton import has_triton from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_compute_comm_reordering.py
func
def func(a): ar = _functional_collectives.all_reduce(a, "sum", "0") b = torch.matmul(a, a) return torch.matmul(ar, b) with _dynamo_dist_per_rank_init( self.rank, self.world_size, fake_pg=not at_least_x_gpu(2) ): inputs = torch.ones(4, 4, dtype=torch.float, device="cuda") + self.rank compiled = torch.compile(func) code = run_and_get_triton_code(compiled, inputs) # Verify that the wait_tensor is sinked below the 1st matmul but # above the 2nd matmul. ( FileCheck() .check("torch.ops._c10d_functional.all_reduce_.default") .check("extern_kernels.mm") .check("torch.ops._c10d_functional.wait_tensor.default") .check("extern_kernels.mm") .run(code) ) out = compiled(inputs) correct = func(inputs) self.assertTrue(same(out, correct))
import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.utils import same from torch._inductor import ir, scheduler from torch._inductor.comm_analysis import ( baseLat, hwLat, llMaxBws, NCCL_ALGO, NCCL_HW, NCCL_PROTO, NVIDIA_GPU_TYPE, ) from torch._inductor.utils import run_and_get_triton_code from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, at_least_x_gpu, DynamoDistributedMultiProcTestCase, requires_nccl, ) from torch.utils._triton import has_triton from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_compute_comm_reordering.py
func
def func(a): ar = _functional_collectives.all_reduce(a, "sum", "0") b = torch.matmul(a, a) return torch.matmul(ar, b) with _dynamo_dist_per_rank_init( self.rank, self.world_size, fake_pg=not at_least_x_gpu(2) ): inputs = torch.ones(4, 4, dtype=torch.float, device="cuda") + self.rank compiled = torch.compile(func) code = run_and_get_triton_code(compiled, inputs) # Verify that the wait_tensor is sinked below the 1st matmul but # above the 2nd matmul. ( FileCheck() .check("torch.ops._c10d_functional.all_reduce_.default") .check("extern_kernels.mm") .check("torch.ops._c10d_functional.wait_tensor.default") .check("extern_kernels.mm") .run(code) ) out = compiled(inputs) correct = func(inputs) self.assertTrue(same(out, correct))
import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.utils import same from torch._inductor import ir, scheduler from torch._inductor.comm_analysis import ( baseLat, hwLat, llMaxBws, NCCL_ALGO, NCCL_HW, NCCL_PROTO, NVIDIA_GPU_TYPE, ) from torch._inductor.utils import run_and_get_triton_code from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, at_least_x_gpu, DynamoDistributedMultiProcTestCase, requires_nccl, ) from torch.utils._triton import has_triton from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_compute_comm_reordering.py
test_nccl_heuristics
def test_nccl_heuristics(self): assert len(baseLat) == len(NCCL_ALGO) assert all(len(x) == len(NCCL_PROTO) for x in baseLat) assert len(hwLat) == len(NCCL_HW) assert all(len(x) == len(NCCL_ALGO) for x in hwLat) assert all(len(y) == len(NCCL_PROTO) for x in hwLat for y in x) assert len(llMaxBws) == len(NVIDIA_GPU_TYPE)
import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.utils import same from torch._inductor import ir, scheduler from torch._inductor.comm_analysis import ( baseLat, hwLat, llMaxBws, NCCL_ALGO, NCCL_HW, NCCL_PROTO, NVIDIA_GPU_TYPE, ) from torch._inductor.utils import run_and_get_triton_code from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, at_least_x_gpu, DynamoDistributedMultiProcTestCase, requires_nccl, ) from torch.utils._triton import has_triton @requires_nccl() class TestComputeCommReorderingMultiProc(DynamoDistributedMultiProcTestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_data_parallel.py
__init__
def __init__(self, t): super().__init__() self.register_buffer('t_rg', t) self.register_buffer('t_not_rg', t.clone().detach())
def __init__(self, t): super().__init__() self.t_rg = nn.Buffer(t) self.t_not_rg = nn.Buffer(t.clone().detach())
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestModule(nn.Module): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestModule(nn.Module): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_c10d_ucc.py
test_tensor_dtype_complex
def test_tensor_dtype_complex(self): self._test_tensor_dtype_complex(backend="ucc")
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, ) class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_c10d_ucc.py
test_allgather_base
def test_allgather_base(self): store = dist.FileStore(self.file_name, self.world_size) dist.init_process_group( "ucc", world_size=self.world_size, rank=self.rank, store=store, ) device = "cuda" tensor = torch.ones(10, 10, device=torch.device(device)) output_tensor = torch.zeros(10, 10, device=torch.device(device)) dist.all_gather_into_tensor(output_tensor, tensor) self.assertEqual(output_tensor, tensor)
import copy import logging import math import operator import os import random import sys import tempfile from functools import reduce import torch import torch.distributed as c10d import test_c10d_common from test_c10d_common import ( gpus_for_rank, ModuleForDdpCommHook, SparseGradientModule, Task, ) import torch.distributed as dist import torch.nn.functional as F import torch.testing._internal.common_utils as common from torch import nn from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( MultiProcessTestCase, requires_ucc, skip_if_lt_x_gpu, verify_ddp_error_logged, ) from torch.testing._internal.common_utils import ( retry_on_connect_failures, run_tests, skip_but_pass_in_sandcastle, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_collective_utils.py
setUp
def setUp(self): super().setUp() self._spawn_processes()
from unittest import mock import torch.distributed as c10d from torch.distributed.collective_utils import all_gather, broadcast from torch.testing._internal.common_distributed import MultiProcessTestCase class TestCollectiveUtils(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_collective_utils.py
opts
def opts(self, threads=2): opts = c10d.ProcessGroupGloo._Options() opts._timeout = 50.0 opts._threads = threads return opts
from unittest import mock import torch.distributed as c10d from torch.distributed.collective_utils import all_gather, broadcast from torch.testing._internal.common_distributed import MultiProcessTestCase class TestCollectiveUtils(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_compute_comm_reordering.py
get_snode_runtime_for_reorder_compute_test
def get_snode_runtime_for_reorder_compute_test(snode): # NOTE: custom cost model to show that the compute reordering algorithm is working # Collective kernels if isinstance(snode.node, ir._CollectiveKernel): return 100 elif isinstance(snode.node, ir._WaitKernel): return 0 # High-arithmetic-intensity compute kernels elif isinstance(snode.node, ir.ExternKernel): return 5 # All other kernels return 1
import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.utils import same from torch._inductor import ir, scheduler from torch._inductor.comm_analysis import ( baseLat, hwLat, llMaxBws, NCCL_ALGO, NCCL_HW, NCCL_PROTO, NVIDIA_GPU_TYPE, ) from torch._inductor.utils import run_and_get_triton_code from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, at_least_x_gpu, DynamoDistributedMultiProcTestCase, requires_nccl, ) from torch.utils._triton import has_triton from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_compute_comm_reordering.py
create_grouped_node_for_allreduce_and_its_deps
def create_grouped_node_for_allreduce_and_its_deps(snodes): name_to_snode = {snode.node.name: snode for snode in snodes} all_reduce_snodes = [ snode for snode in snodes if isinstance(snode.node, ir._CollectiveKernel) and snode.node.op_overload == torch.ops._c10d_functional.all_reduce_.default ] assert len(all_reduce_snodes) == 1 all_reduce_snode = all_reduce_snodes[0] all_reduce_dep_snodes = [ name_to_snode[node.name] for node in all_reduce_snode.node.inputs ] assert len(all_reduce_dep_snodes) == 1 all_reduce_dep_snode = all_reduce_dep_snodes[0] grouped_snode = scheduler.GroupedSchedulerNode.create( [all_reduce_dep_snode, all_reduce_snode] ) new_snode_order = [] new_snode_order.append(grouped_snode) for snode in snodes: if snode in grouped_snode.snodes: continue new_snode_order.append(snode) return new_snode_order @requires_nccl() class TestComputeCommReorderingMultiProc(DynamoDistributedMultiProcTestCase): """ Run correctness checks in multi-proc runner, mark with minimum # GPUs to run under """ def get_world_trs(self): return { "tag": "", "ranks": list(range(self.world_size)), "group_size": self.world_size, } @property def world_size(self) -> int: # hack: no matter whether we have 2 or 3 or 4 gpus, just run on 2 # works around issue with skipif<2 and workers with unpredictable #s gpu return 2 @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @patch.object(torch._inductor.config, "allow_buffer_reuse", True) # TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor @patch.object(torch._inductor.config, "compile_threads", 1) @patch.object(torch._inductor.config, "reorder_for_locality", False) @patch.object(torch._inductor.config, "reorder_for_compute_comm_overlap", True) @patch.object( torch._inductor.config, "reorder_for_compute_comm_overlap_passes", [ "sink_waits", ], ) def test_sink_waits(self): def func(a): ar = _functional_collectives.all_reduce(a, "sum", "0") b = torch.matmul(a, a) return torch.matmul(ar, b) with _dynamo_dist_per_rank_init( self.rank, self.world_size, fake_pg=not at_least_x_gpu(2) ): inputs = torch.ones(4, 4, dtype=torch.float, device="cuda") + self.rank compiled = torch.compile(func) code = run_and_get_triton_code(compiled, inputs) # Verify that the wait_tensor is sinked below the 1st matmul but # above the 2nd matmul. ( FileCheck() .check("torch.ops._c10d_functional.all_reduce_.default") .check("extern_kernels.mm") .check("torch.ops._c10d_functional.wait_tensor.default") .check("extern_kernels.mm") .run(code) ) out = compiled(inputs) correct = func(inputs) self.assertTrue(same(out, correct)) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @patch.object(torch._inductor.config, "allow_buffer_reuse", True) # TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor @patch.object(torch._inductor.config, "compile_threads", 1) @patch.object(torch._inductor.config, "reorder_for_locality", False) @patch.object(torch._inductor.config, "reorder_for_compute_comm_overlap", True) @patch.object( torch._inductor.config, "reorder_for_compute_comm_overlap_passes", [ "raise_comms", ], ) def test_raise_comms(self): def func(a): b = torch.matmul(a, a) c = torch.relu(b) d = torch.matmul(c, c) e = _functional_collectives.all_reduce(b, "sum", "0") return torch.matmul(d, e) with _dynamo_dist_per_rank_init( self.rank, self.world_size, fake_pg=not at_least_x_gpu(2) ): inputs = torch.ones(4, 4, dtype=torch.float, device="cuda") + self.rank compiled = torch.compile(func) code = run_and_get_triton_code(compiled, inputs) print(code) # Verify that the all_reduce_ has been raised above the 2nd matmul # but below the 1st matmul. Note that the all_reduce_ directly # writes to the output buffer of the 1st matmul, which is an input # to the first relu. Therefore, the all_reduce_ should be scheduled # after the first relu. ( FileCheck() .check("extern_kernels.mm") .check("triton_poi_fused_relu") .check("torch.ops._c10d_functional.all_reduce_.default") .check("extern_kernels.mm") .check("torch.ops._c10d_functional.wait_tensor.default") .check("extern_kernels.mm") .run(code) ) out = compiled(inputs) correct = func(inputs) self.assertTrue(same(out, correct)) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @patch.object(torch._inductor.config, "allow_buffer_reuse", True) # TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor @patch.object(torch._inductor.config, "compile_threads", 1) @patch.object(torch._inductor.config, "reorder_for_compute_comm_overlap", True) @patch.object( torch._inductor.config, "reorder_for_compute_comm_overlap_passes", [ "sink_waits", "raise_comms", ], ) def test_sink_waits_raise_comms(self): def func(a, *, tag, ranks, group_size): b = torch.matmul(a, a) c = torch.relu(b) d = torch.matmul(c, c) e = _functional_collectives.all_reduce(b, "sum", "0") f = torch.relu(d) g = torch.matmul(f, f) return torch.mm(e, g) with _dynamo_dist_per_rank_init( self.rank, self.world_size, fake_pg=not at_least_x_gpu(2) ): inputs = torch.ones(4, 4, dtype=torch.float, device="cuda") + self.rank compiled = torch.compile(func) code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs()) # Things to verify: # - The clone prologue of the all_reduce_ should not be fused with # any relus. # - The all_reduce_ and its prologue should be raised above the 2nd # matmul but below the 1st matmul. # - The wait_tensor should be sinked below the 3rd matmul but above # the 4th matmul. ( FileCheck() .check("extern_kernels.mm") .check("triton_poi_fused_all_reduce_0") .check("torch.ops._c10d_functional.all_reduce_.default") .check("triton_poi_fused_relu") .check("extern_kernels.mm") .check("triton_poi_fused_relu") .check("extern_kernels.mm") .check("torch.ops._c10d_functional.wait_tensor.default") .check("extern_kernels.mm") .run(code) ) out = compiled(inputs, **self.get_world_trs()) correct = func(inputs, **self.get_world_trs()) self.assertTrue(same(out, correct)) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @patch.object(torch._inductor.config, "allow_buffer_reuse", True) # TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor @patch.object(torch._inductor.config, "compile_threads", 1) @patch.object(torch._inductor.config, "reorder_for_compute_comm_overlap", True) @patch.object( torch._inductor.config, "reorder_for_compute_comm_overlap_passes", [ "reorder_compute_for_overlap", ], ) def test_reorder_compute_for_overlap(self): def func(a, *, tag, ranks, group_size): ar = _functional_collectives.all_reduce(a, "sum", ranks, tag) g = torch.matmul(a, a) c = torch.relu(a) d = torch.matmul(c, c) f = d * c * ar fr = _functional_collectives.all_reduce(f, "sum", ranks, tag) e = torch.matmul(d + ar + fr, g) return (e,) with _dynamo_dist_per_rank_init( self.rank, self.world_size, fake_pg=not at_least_x_gpu(2) ): inputs = torch.ones(4, 4, dtype=torch.float, device="cuda") + self.rank compiled = torch.compile(func) code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs()) # NOTE: after scheduling the first all_reduce: # 1. we first schedule the ops (c and d) that ARE required for second all_reduce but DO NOT depend on first all_reduce. # 2. then, we schedule the ops (g) that ARE NOT required for second all_reduce and DO NOT depend on first all_reduce. # 3. then, we schedule the ops (f) that ARE required for second all_reduce and DO depend on first all_reduce. # and then, we schedule the second all_reduce. And then schedule all ops that depend on second all_reduce. ( FileCheck() .check("torch.ops._c10d_functional.all_reduce_.default") .check("triton_poi_fused_relu") .check("extern_kernels.mm") .check("extern_kernels.mm") .check("torch.ops._c10d_functional.wait_tensor.default") .check("triton_poi_fused_mul") .check("torch.ops._c10d_functional.all_reduce_.default") .check("torch.ops._c10d_functional.wait_tensor.default") .check("triton_poi_fused_add") .check("extern_kernels.mm") .run(code) ) out = compiled(inputs, **self.get_world_trs()) correct = func(inputs, **self.get_world_trs()) self.assertTrue(same(out, correct)) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @patch.object(torch._inductor.config, "allow_buffer_reuse", True) # TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor @patch.object(torch._inductor.config, "compile_threads", 1) @patch.object(torch._inductor.config, "reorder_for_compute_comm_overlap", True) @patch.object( torch._inductor.config, "reorder_for_compute_comm_overlap_passes", [ "reorder_compute_for_overlap", ], ) @patch.object( torch._inductor.config, "estimate_op_runtime", get_snode_runtime_for_reorder_compute_test, ) def test_reorder_compute_for_overlap_custom_runtime_estimation(self): def func(a, *, tag, ranks, group_size): ar = _functional_collectives.all_reduce(a, "sum", ranks, tag) g = torch.matmul(a, a) c = torch.relu(a) d = torch.matmul(c, c) f = d * c * ar fr = _functional_collectives.all_reduce(f, "sum", ranks, tag) e = torch.matmul(d + ar + fr, g) return (e,) with _dynamo_dist_per_rank_init( self.rank, self.world_size, fake_pg=not at_least_x_gpu(2) ): inputs = torch.ones(4, 4, dtype=torch.float, device="cuda") + self.rank compiled = torch.compile(func) code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs()) # NOTE: after scheduling the first all_reduce: # 1. we first schedule the ops (c and d) that ARE required for second all_reduce but DO NOT depend on first all_reduce. # 2. then, we schedule the ops (g) that ARE NOT required for second all_reduce and DO NOT depend on first all_reduce. # 3. then, we schedule the ops (f) that ARE required for second all_reduce and DO depend on first all_reduce. # and then, we schedule the second all_reduce. And then schedule all ops that depend on second all_reduce. ( FileCheck() .check("torch.ops._c10d_functional.all_reduce_.default") .check("triton_poi_fused_relu") .check("extern_kernels.mm") .check("extern_kernels.mm") .check("torch.ops._c10d_functional.wait_tensor.default") .check("triton_poi_fused_mul") .check("torch.ops._c10d_functional.all_reduce_.default") .check("torch.ops._c10d_functional.wait_tensor.default") .check("triton_poi_fused_add") .check("extern_kernels.mm") .run(code) ) out = compiled(inputs, **self.get_world_trs()) correct = func(inputs, **self.get_world_trs()) self.assertTrue(same(out, correct)) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") # TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor @patch.object(torch._inductor.config, "compile_threads", 1) @patch.object( torch._inductor.config, "_pre_fusion_custom_pass", create_grouped_node_for_allreduce_and_its_deps, ) def test_grouped_scheduler_node(self): def func(a, *, tag, ranks, group_size): add = a + a div = add / a ar = _functional_collectives.all_reduce(div, "sum", ranks, tag) # Normally, we would fuse `add = a + a`, `div = add / a` and `mul = a * a` together into a single fused op, # but here in this unit test, we intentionally put `add`, `div` and `ar` computation # into a GroupedSchedulerNode, which prevents them from being fused with any other ops. mul = a * a mm = torch.matmul(mul, ar) return (mm,) with _dynamo_dist_per_rank_init( self.rank, self.world_size, fake_pg=not at_least_x_gpu(2) ): inputs = torch.ones(4, 4, dtype=torch.float, device="cuda") + self.rank compiled = torch.compile(func) code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs()) # Expectations: # 1. `add = a + a` and `div = add / a` are still fused, which means fusion # still happens among nodes within a GroupedSchedulerNode. # 2. `mul = a * a` is not fused with `add` or `div`, because the latter two are within # GroupedSchedulerNode and thus are prevented from being fused with any outside ops. FileCheck().check("triton_poi_fused_add_div_0.").check( "_c10d_functional.all_reduce_." ).check("triton_poi_fused_mul_1.").run(code) out = compiled(inputs, **self.get_world_trs()) correct = func(inputs, **self.get_world_trs()) self.assertTrue(same(out, correct)) def test_nccl_heuristics(self): assert len(baseLat) == len(NCCL_ALGO) assert all(len(x) == len(NCCL_PROTO) for x in baseLat) assert len(hwLat) == len(NCCL_HW) assert all(len(x) == len(NCCL_ALGO) for x in hwLat) assert all(len(y) == len(NCCL_PROTO) for x in hwLat for y in x) assert len(llMaxBws) == len(NVIDIA_GPU_TYPE) if __name__ == "__main__": from torch._dynamo.test_case import run_tests run_tests()
import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.utils import same from torch._inductor import ir, scheduler from torch._inductor.comm_analysis import ( baseLat, hwLat, llMaxBws, NCCL_ALGO, NCCL_HW, NCCL_PROTO, NVIDIA_GPU_TYPE, ) from torch._inductor.utils import run_and_get_triton_code from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, at_least_x_gpu, DynamoDistributedMultiProcTestCase, requires_nccl, ) from torch.utils._triton import has_triton from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_compute_comm_reordering.py
func
def func(a): ar = _functional_collectives.all_reduce(a, "sum", "0") b = torch.matmul(a, a) return torch.matmul(ar, b) with _dynamo_dist_per_rank_init( self.rank, self.world_size, fake_pg=not at_least_x_gpu(2) ): inputs = torch.ones(4, 4, dtype=torch.float, device="cuda") + self.rank compiled = torch.compile(func) code = run_and_get_triton_code(compiled, inputs) # Verify that the wait_tensor is sinked below the 1st matmul but # above the 2nd matmul. ( FileCheck() .check("torch.ops._c10d_functional.all_reduce_.default") .check("extern_kernels.mm") .check("torch.ops._c10d_functional.wait_tensor.default") .check("extern_kernels.mm") .run(code) ) out = compiled(inputs) correct = func(inputs) self.assertTrue(same(out, correct))
import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.utils import same from torch._inductor import ir, scheduler from torch._inductor.comm_analysis import ( baseLat, hwLat, llMaxBws, NCCL_ALGO, NCCL_HW, NCCL_PROTO, NVIDIA_GPU_TYPE, ) from torch._inductor.utils import run_and_get_triton_code from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, at_least_x_gpu, DynamoDistributedMultiProcTestCase, requires_nccl, ) from torch.utils._triton import has_triton from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_data_parallel.py
test
def test(inner_m, dp_device, inp, device_ids, should_fail): if device_ids is None: device_ids = list(range(torch.cuda.device_count())) if isinstance(device_ids[0], torch.device): expect_device = device_ids[0] else: expect_device = torch.device("cuda:{}".format(device_ids[0])) if should_fail: def assert_correct(): return self.assertRaisesRegex(RuntimeError, error_msg.format(expect_device)) else: assert_correct = dummy_ctx_manager # test DataParallel module dpm = nn.DataParallel(inner_m, device_ids) if dp_device is not None: dpm = dpm.to(dp_device) with assert_correct(): dpm(inp) # test functional with assert_correct(): nn.parallel.data_parallel(inner_m.to(dp_device), inp, device_ids) test(l.to('cpu'), None, inp, None, should_fail=True) test(l.cuda(1), None, inp_cuda0, None, should_fail=True) test(l.cuda(), None, inp_cuda0, [1, 0], should_fail=True) test(l.cuda(), None, inp_cuda0, None, should_fail=False) test(l.cpu(), 'cuda', inp_cuda0, None, should_fail=False) test(l.cuda(1), None, inp_cuda1, [1, 0], should_fail=False) test(l.cpu(), 'cuda:1', inp_cuda1, [1, 0], should_fail=False) s = nn.Sequential(l.cpu()) test(s, None, inp, None, should_fail=True) test(s, None, inp, [0, 1], should_fail=True) test(s, None, inp, [1, 0], should_fail=True) s = nn.Sequential(deepcopy(l).cpu(), l.cuda()) test(s, None, inp, None, should_fail=True) test(s, None, inp, [0, 1], should_fail=True) test(s, None, inp, [1, 0], should_fail=True) s = nn.Sequential(l.cuda(), deepcopy(l).cuda(1)) test(s, None, inp, None, should_fail=True) test(s, None, inp, [0, 1], should_fail=True) test(s, None, inp, [1, 0], should_fail=True) s = nn.Sequential(l.cuda(), deepcopy(l).cuda()) test(s, None, inp, None, should_fail=False) test(s, None, inp, [0, 1], should_fail=False) test(s, None, inp, [1, 0], should_fail=True) test(s.cpu(), None, inp, [1, 0], should_fail=True) test(s.cuda(1), None, inp, [1, 0], should_fail=False)
def test(inner_m, dp_device, inp, device_ids, should_fail): if device_ids is None: device_ids = list(range(torch.cuda.device_count())) if isinstance(device_ids[0], torch.device): expect_device = device_ids[0] else: expect_device = torch.device(f"cuda:{device_ids[0]}") if should_fail: def assert_correct(): return self.assertRaisesRegex( RuntimeError, error_msg.format(expect_device) ) else: assert_correct = dummy_ctx_manager # test DataParallel module dpm = nn.DataParallel(inner_m, device_ids) if dp_device is not None: dpm = dpm.to(dp_device) with assert_correct(): dpm(inp) # test functional with assert_correct(): nn.parallel.data_parallel(inner_m.to(dp_device), inp, device_ids) test(l.to("cpu"), None, inp, None, should_fail=True) test(l.cuda(1), None, inp_cuda0, None, should_fail=True) test(l.cuda(), None, inp_cuda0, [1, 0], should_fail=True) test(l.cuda(), None, inp_cuda0, None, should_fail=False) test(l.cpu(), "cuda", inp_cuda0, None, should_fail=False) test(l.cuda(1), None, inp_cuda1, [1, 0], should_fail=False) test(l.cpu(), "cuda:1", inp_cuda1, [1, 0], should_fail=False) s = nn.Sequential(l.cpu()) test(s, None, inp, None, should_fail=True) test(s, None, inp, [0, 1], should_fail=True) test(s, None, inp, [1, 0], should_fail=True) s = nn.Sequential(deepcopy(l).cpu(), l.cuda()) test(s, None, inp, None, should_fail=True) test(s, None, inp, [0, 1], should_fail=True) test(s, None, inp, [1, 0], should_fail=True) s = nn.Sequential(l.cuda(), deepcopy(l).cuda(1)) test(s, None, inp, None, should_fail=True) test(s, None, inp, [0, 1], should_fail=True) test(s, None, inp, [1, 0], should_fail=True) s = nn.Sequential(l.cuda(), deepcopy(l).cuda()) test(s, None, inp, None, should_fail=False) test(s, None, inp, [0, 1], should_fail=False) test(s, None, inp, [1, 0], should_fail=True) test(s.cpu(), None, inp, [1, 0], should_fail=True) test(s.cuda(1), None, inp, [1, 0], should_fail=False)
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_data_parallel.py
__init__
def __init__(self, t): super().__init__() self.register_buffer('t_rg', t) self.register_buffer('t_not_rg', t.clone().detach())
def __init__(self, t): super().__init__() self.t_rg = nn.Buffer(t) self.t_not_rg = nn.Buffer(t.clone().detach())
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestModule(nn.Module): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestModule(nn.Module): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_data_parallel.py
test_data_parallel_complex
def test_data_parallel_complex(self): # We expect complex parameters to be broadcast by view_as_real, e.g. move from C to R^2 class Cplx(torch.nn.Module): def __init__(self): super().__init__() self.cplx = torch.nn.Parameter(torch.zeros(1, 10, dtype=torch.cfloat).cuda()) def forward(self, x): return x + self.cplx cplx = torch.nn.DataParallel(Cplx().cuda()) input = torch.rand(1, 10, dtype=torch.cfloat).cuda() result = cplx(input) # 2 is the extra real view dimension here self.assertEqual(result.size(), torch.Size([1, 10, 2])) self.assertEqual(result, torch.view_as_real(input))
def test_data_parallel_complex(self): # We expect complex parameters to be broadcast by view_as_real, e.g. move from C to R^2 class Cplx(torch.nn.Module): def __init__(self) -> None: super().__init__() self.cplx = torch.nn.Parameter( torch.zeros(1, 10, dtype=torch.cfloat).cuda() ) def forward(self, x): return x + self.cplx cplx = torch.nn.DataParallel(Cplx().cuda()) input = torch.rand(1, 10, dtype=torch.cfloat).cuda() result = cplx(input) # 2 is the extra real view dimension here self.assertEqual(result.size(), torch.Size([1, 10, 2])) self.assertEqual(result, torch.view_as_real(input))
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestDataParallel(TestCase): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestDataParallel(TestCase): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_data_parallel.py
__init__
def __init__(self, t): super().__init__() self.register_buffer('t_rg', t) self.register_buffer('t_not_rg', t.clone().detach())
def __init__(self, t): super().__init__() self.t_rg = nn.Buffer(t) self.t_not_rg = nn.Buffer(t.clone().detach())
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestModule(nn.Module): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestModule(nn.Module): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_data_parallel.py
__init__
def __init__(self, t): super().__init__() self.register_buffer('t_rg', t) self.register_buffer('t_not_rg', t.clone().detach())
def __init__(self, t): super().__init__() self.t_rg = nn.Buffer(t) self.t_not_rg = nn.Buffer(t.clone().detach())
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestModule(nn.Module): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestModule(nn.Module): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_data_parallel.py
test_data_parallel_module_kwargs_only_empty_tuple
def test_data_parallel_module_kwargs_only_empty_tuple(self, device, dtype): class Net(nn.Module): def __init__(self): super().__init__() self.l = l def forward(self, input): return self.l(input['data']) l = nn.Linear(10, 5).to(device, dtype) i = torch.randn(20, 10, device=device, dtype=dtype) expected_out = l(i) n = nn.DataParallel(Net()) out = n(input={'data': i, 'unused': ()}) self.assertEqual(out.get_device(), 0) self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
def test_data_parallel_module_kwargs_only_empty_tuple(self, device, dtype): class Net(nn.Module): def __init__(self) -> None: super().__init__() self.l = l def forward(self, input): return self.l(input["data"]) l = nn.Linear(10, 5).to(device, dtype) i = torch.randn(20, 10, device=device, dtype=dtype) expected_out = l(i) n = nn.DataParallel(Net()) out = n(input={"data": i, "unused": ()}) self.assertEqual(out.get_device(), 0) self.assertEqual(out, expected_out, atol=dtype2prec_DONTUSE[dtype], rtol=0)
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) import gc class TestDataParallelDeviceType(TestCase):
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) import gc class TestDataParallelDeviceType(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_data_parallel.py
__init__
def __init__(self, t): super().__init__() self.register_buffer('t_rg', t) self.register_buffer('t_not_rg', t.clone().detach())
def __init__(self, t): super().__init__() self.t_rg = nn.Buffer(t) self.t_not_rg = nn.Buffer(t.clone().detach())
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestModule(nn.Module): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestModule(nn.Module): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_device_mesh.py
_get_device_type
def _get_device_type(world_size): if ( torch.cuda.is_available() and torch.cuda.device_count() >= world_size and is_nccl_available() ): device_type = "cuda" else: device_type = "cpu" return device_type
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
_set_env_var
def _set_env_var(addr="localhost", port="25364", world_size=1, rank=0): os.environ["MASTER_ADDR"] = addr os.environ["MASTER_PORT"] = port os.environ["WORLD_SIZE"] = f"{world_size}" os.environ["RANK"] = f"{rank}" class DeviceMeshTestGlooBackend(DTensorTestBase): @property def backend(self): return "gloo" @with_comms def test_device_mesh_reuse_default_group(self): mesh = init_device_mesh(self.device_type, (self.world_size,)) mesh_group = mesh.get_group() default_group = _get_default_group() if torch.cuda.is_available(): self.assertNotEqual(mesh_group, default_group) self.assertEqual(get_world_size(mesh_group), get_world_size(default_group)) else: self.assertEqual(mesh_group, default_group) class DeviceMeshTest(DTensorTestBase): @property def world_size(self): return 4 def test_init_process_group(self): device_type = _get_device_type(self.world_size) mesh_tensor = torch.arange(4).reshape(2, 2) self.assertTrue(not is_initialized()) _set_env_var(world_size=self.world_size, rank=self.rank) DeviceMesh(device_type, mesh_tensor) self.assertTrue(is_initialized()) self.destroy_pg() @with_comms @skip_if_lt_x_gpu(4) def test_assert_invalid_mesh_tensor(self): mesh = torch.arange(self.world_size).to(self.rank) with self.assertRaises(ValueError): device_mesh = DeviceMesh(self.device_type, mesh) @with_comms def test_get_group_and_get_all_groups(self): mesh_shape = (2, self.world_size // 2) mesh_2d = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=("dp", "tp") ) tp_mesh = mesh_2d["tp"] dp_mesh = mesh_2d["dp"] self.assertEqual(mesh_2d.get_group(0), mesh_2d.get_group("dp")) self.assertEqual(mesh_2d.get_group(1), mesh_2d.get_group("tp")) self.assertEqual(mesh_2d.get_group("dp"), dp_mesh.get_group()) self.assertEqual(mesh_2d.get_group("tp"), tp_mesh.get_group()) groups = mesh_2d.get_all_groups() self.assertEqual(len(groups), 2) self.assertTrue(tp_mesh.get_group() in groups) self.assertTrue(dp_mesh.get_group() in groups) @with_comms def test_get_local_rank_raises_exception(self): mesh_shape = (2, self.world_size // 2) mesh_2d = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=("dp", "tp") ) with self.assertRaisesRegex( RuntimeError, "Optional kwarg `mesh_dim` needs to be specified when device_mesh.ndim > 1.", ): local_rank = mesh_2d.get_local_rank() @with_comms def test_get_local_rank(self): mesh_shape = (2, self.world_size // 2) mesh_2d = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=("dp", "tp") ) self.assertEqual(mesh_2d.get_local_rank("dp"), mesh_2d.get_local_rank(0)) self.assertEqual(mesh_2d.get_local_rank("tp"), mesh_2d.get_local_rank(1)) dp_mesh = mesh_2d["dp"] tp_mesh = mesh_2d["tp"] self.assertEqual(dp_mesh.get_local_rank(), mesh_2d.get_local_rank("dp")) self.assertEqual(tp_mesh.get_local_rank(), mesh_2d.get_local_rank("tp")) # Verify flattened mesh local rank correctness. flattened_mesh = mesh_2d["dp", "tp"]._flatten() self.assertEqual(flattened_mesh.get_local_rank(), self.rank) @with_comms def test_device_mesh_2d(self): mesh_tensor = torch.arange(4).reshape(2, 2) # construct a cuda device mesh mesh = DeviceMesh(self.device_type, mesh_tensor) # check all dim groups dim_to_subgroups = mesh.get_all_groups() expected_ranks_by_dim = [[[0, 2], [1, 3]], [[0, 1], [2, 3]]] for dim, dim_group in enumerate(dim_to_subgroups): self.assertTrue(dim < 2) dim_ranks = expected_ranks_by_dim[dim] dim_group_size = get_world_size(dim_group) self.assertIsInstance(dim_group, ProcessGroup) self.assertEqual(dim_group_size, 2) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] current_rank_expected_group_ranks = ( dim_ranks[0] if self.rank in dim_ranks[0] else dim_ranks[1] ) self.assertEqual(global_ranks, current_rank_expected_group_ranks) @with_comms def test_device_mesh_init_backend(self): mesh = DeviceMesh(self.device_type, [1], _init_backend=False) with self.assertRaisesRegex(RuntimeError, "process groups not initialized!"): mesh.get_group() # coordinates should always been populated when init_backend is False, as whenever # we call init_backend we should make sure the default pg already created mesh.get_coordinate() def test_fake_pg_device_mesh(self): fake_store = FakeStore() init_process_group("fake", store=fake_store, rank=0, world_size=self.world_size) device_type = "cuda" if torch.cuda.is_available() else "cpu" mesh = DeviceMesh(device_type, torch.arange(self.world_size)) local_tensor = torch.randn(2, 8) global_tensor = funcol.all_gather_tensor( local_tensor, gather_dim=0, group=(mesh, 0) ) self.assertEqual(global_tensor.shape, (self.world_size * 2, 8)) @with_comms def test_from_group_with_global_pg(self): # Simple test: check `from_group` from a mesh pg vs. directly # initializing via `init_device_mesh` ref_global_mesh = init_device_mesh(self.device_type, (self.world_size,)) mesh_pg = ref_global_mesh.get_group() global_mesh = DeviceMesh.from_group(mesh_pg, self.device_type) self.assertEqual(ref_global_mesh, global_mesh) self.assertEqual(ref_global_mesh._dim_group_infos, global_mesh._dim_group_infos) self.assertEqual( ref_global_mesh._coordinate_on_dim, global_mesh._coordinate_on_dim ) @with_comms def test_from_group_with_invalid_mesh(self): global_pg = _get_default_group() global_pg_size = global_pg.size() assert global_pg_size == 4, "Test assumes global world size of 4" invalid_mesh = [[0, 1], [2, 3]] # 2D mesh when we need 1D regex = r"Invalid mesh \[\[0, 1\], \[2, 3\]\] for ProcessGroup with ranks \[0, 1, 2, 3\]" with self.assertRaisesRegex(ValueError, regex): DeviceMesh.from_group(global_pg, "cuda", invalid_mesh) device_mesh = init_device_mesh(self.device_type, (2, 2)) groups = device_mesh.get_all_groups() invalid_mesh = (0, 1, 2, 3) # 1D mesh when we need 2D regex = r"Expects mesh with ndim equal to number of ProcessGroups but got mesh \[0, 1, 2, 3\] and 2 ProcessGroups" with self.assertRaisesRegex(ValueError, regex): DeviceMesh.from_group(groups, self.device_type, invalid_mesh) def test_raises_invalid_device_type(self): with self.assertRaisesRegex( RuntimeError, "Device type with GPU index is not supported", ): # test init_device_mesh with an invalid device type that contains a GPU index mesh_shape = (2, self.world_size // 2) mesh_2d = init_device_mesh( "cuda:0", mesh_shape=mesh_shape, mesh_dim_names=("dp", "tp") ) @with_comms def test_set_mesh_dim_group_options(self): device_type = "cuda" if torch.cuda.is_available() else "cpu" _mesh_resources._set_mesh_dim_group_options(1, "fake", None) mesh_tensor = torch.arange(4).reshape(2, 2) mesh = DeviceMesh(device_type, mesh_tensor) self.assertEqual(mesh.get_group(1)._get_backend_name(), "fake") class DeviceMeshTestNDim(DTensorTestBase): @property def world_size(self): return 8 @with_comms def test_device_mesh_nd(self): # construct a cuda device mesh mesh_tensor = torch.arange(8).reshape(2, 2, 2) mesh = DeviceMesh(self.device_type, mesh_tensor) # check all dim groups dim_to_subgroups = mesh.get_all_groups() for dim, dim_group in enumerate(dim_to_subgroups): self.assertTrue(dim < mesh_tensor.ndim) dim_ranks = mesh_tensor.swapdims(-1, dim).reshape(-1, 2) dim_group_size = get_world_size(dim_group) self.assertIsInstance(dim_group, ProcessGroup) self.assertEqual(dim_group_size, 2) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] for ranks in dim_ranks: if self.rank in ranks: self.assertEqual(global_ranks, ranks.tolist()) @with_comms def test_device_mesh_hash(self): mesh_tensor_2d = torch.arange(8).reshape(4, 2) mesh = DeviceMesh(self.device_type, mesh_tensor_2d) mesh2 = DeviceMesh(self.device_type, mesh_tensor_2d) self.assertEqual(hash(mesh), hash(mesh2)) mesh_tensor_3d = torch.arange(8).reshape(2, 2, 2) mesh3 = DeviceMesh(self.device_type, mesh_tensor_3d) self.assertNotEqual(hash(mesh), hash(mesh3)) self.assertNotEqual(hash(mesh2), hash(mesh3)) @with_comms def test_get_local_rank_3d(self): """ If we have a 3D mesh and we want to apply dp, pp, tp to it, mesh_dim_names = ["dp", "pp", "tp"], and the mesh tensor would be: mesh_3d_tensor = [ [ [0, 1], [2, 3], ], [ [4, 5], [6, 7], ] ] """ mesh_shape = (2, 2, 2) mesh_3d = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=("dp", "pp", "tp") ) # tp_rank_0: [0, 2, 4, 6], tp_rank_1: [1, 3, 5, 7] tp_rank = mesh_3d.get_local_rank("tp") expected_tp_rank = self.rank % 2 self.assertEqual(tp_rank, expected_tp_rank) # pp_rank_0: [0, 1, 4, 5], pp_rank_1: [2, 3, 6, 7] pp_rank = mesh_3d.get_local_rank("pp") expected_pp_rank = 0 if self.rank % 4 <= 1 else 1 self.assertEqual(pp_rank, expected_pp_rank) # dp_rank_0: [0, 1, 2, 3], dp_rank_1: [4, 5, 6, 7] dp_rank = mesh_3d.get_local_rank("dp") expected_dp_rank = self.rank // 4 self.assertEqual(dp_rank, expected_dp_rank) @with_comms def test_device_mesh_parent_child_hash(self): mesh_2d = init_device_mesh( self.device_type, (2, self.world_size // 2), mesh_dim_names=("DP", "TP") ) mesh_group_1 = torch.arange(0, self.world_size // 2) mesh_group_2 = torch.arange(self.world_size // 2, self.world_size) ep_mesh_1 = DeviceMesh(self.device_type, mesh_group_1) ep_mesh_2 = DeviceMesh(self.device_type, mesh_group_2) ep_mesh = ep_mesh_1 if self.rank < self.world_size // 2 else ep_mesh_2 # ep_mesh is considered different from mesh_2d["TP"] self.assertEqual(mesh_2d["TP"]._flatten_mesh_list, ep_mesh._flatten_mesh_list) self.assertEqual(mesh_2d["TP"].mesh.shape, ep_mesh.mesh.shape) self.assertEqual(mesh_2d["TP"].device_type, ep_mesh.device_type) self.assertNotEqual(mesh_2d["TP"].mesh_dim_names, ep_mesh.mesh_dim_names) self.assertEqual(mesh_2d["TP"]._thread_id, ep_mesh._thread_id) self.assertNotEqual(hash(mesh_2d["TP"]), hash(ep_mesh)) self.assertNotEqual(mesh_2d["TP"], ep_mesh) another_mesh_1 = DeviceMesh(self.device_type, mesh_group_1) another_mesh_2 = DeviceMesh(self.device_type, mesh_group_2) another_mesh = ( another_mesh_1 if self.rank < self.world_size // 2 else another_mesh_2 ) # another_mesh is considered the same as ep_mesh self.assertEqual(ep_mesh._flatten_mesh_list, another_mesh._flatten_mesh_list) self.assertEqual(ep_mesh.mesh.shape, another_mesh.mesh.shape) self.assertEqual(ep_mesh.device_type, another_mesh.device_type) self.assertEqual(ep_mesh.mesh_dim_names, another_mesh.mesh_dim_names) self.assertEqual(ep_mesh._thread_id, another_mesh._thread_id) self.assertEqual(hash(ep_mesh), hash(another_mesh)) self.assertEqual(ep_mesh, another_mesh) @with_comms def test_from_group_with_mesh_shape(self): """Tests ``from_group`` when passing ``mesh_shape`` as 2D.""" # Consider two different logical views of the same mesh: # - (4, 2) ("dp", "tp") mesh # - (2, 2, 2) ("dp_replicate", "dp_shard", "tp") mesh mesh_shape = (2, 2, 2) mesh_dim_names = ("dp_replicate", "dp_shard", "tp") ref_mesh = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=mesh_dim_names ) dp_shard_group = ref_mesh["dp_shard"].get_group() dp_replicate_group = ref_mesh["dp_replicate"].get_group() dp_mesh = DeviceMesh.from_group( [dp_replicate_group, dp_shard_group], self.device_type, mesh=ref_mesh.mesh[:, :, ref_mesh.get_local_rank(2)], mesh_dim_names=mesh_dim_names[:2], ) ref_mesh_dp_dim_group_infos = ref_mesh._dim_group_infos[:2] for (_, ref_ranks, _), (_, ranks, _) in zip( ref_mesh_dp_dim_group_infos, dp_mesh._dim_group_infos ): self.assertEqual(ref_ranks, ranks) # Cannot check directly for mesh equality since parent meshes are not # the same since the ref's parent mesh is 3D self.assertEqual(dp_mesh["dp_replicate"].mesh, ref_mesh["dp_replicate"].mesh) for (_, ref_ranks, _), (_, ranks, _) in zip( dp_mesh["dp_replicate"]._dim_group_infos, ref_mesh["dp_replicate"]._dim_group_infos, ): self.assertEqual(ref_ranks, ranks) self.assertEqual(dp_mesh["dp_shard"].mesh, ref_mesh["dp_shard"].mesh) for (_, ref_ranks, _), (_, ranks, _) in zip( dp_mesh["dp_shard"]._dim_group_infos, ref_mesh["dp_shard"]._dim_group_infos ): self.assertEqual(ref_ranks, ranks) class InitDeviceMeshTest(DTensorTestBase): @property def world_size(self): return 8 @with_comms def test_init_device_mesh(self): mesh_shape = (2, 4) mesh_dim_names = ("DP", "TP") ref_mesh = DeviceMesh( self.device_type, torch.arange(8).view(mesh_shape), mesh_dim_names=mesh_dim_names, ) # test init_device_mesh with mesh_dim_names mesh_2d = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=mesh_dim_names ) self.assertEqual(mesh_2d, ref_mesh) self.assertEqual(mesh_2d.mesh_dim_names, mesh_dim_names) @with_comms def test_raises_duplicate_mesh_dim_names(self): with self.assertRaisesRegex( RuntimeError, "Each mesh_dim_name must be unique.", ): mesh = init_device_mesh( self.device_type, (2, 4), mesh_dim_names=["dp", "dp"], ) @with_comms def test_raises_mesh_shape_mesh_dim_names_mismatch(self): with self.assertRaisesRegex( RuntimeError, "mesh_shape and mesh_dim_names should have same length!", ): mesh = init_device_mesh( self.device_type, (8,), mesh_dim_names=["dp", "tp"], ) class TestDeviceMeshGetItem(DTensorTestBase): @property def world_size(self): return 8 @with_comms def test_raises_no_mesh_dim_found(self): with self.assertRaisesRegex( RuntimeError, "Cannot slice a DeviceMesh without mesh_dim_names!" ): mesh = init_device_mesh(self.device_type, (2, 4)) child_mesh = mesh["DP"] @with_comms def test_raises_invalid_mesh_dim_name(self): child_mesh_dim_name = ("PP",) with self.assertRaisesRegex(KeyError, "Invalid mesh_dim_name"): mesh_dim_names = ("DP", "TP") mesh = init_device_mesh( self.device_type, (2, 4), mesh_dim_names=mesh_dim_names ) child_mesh = mesh[child_mesh_dim_name] @with_comms def test_get_item_2d(self): mesh_shape = (2, 4) mesh_dim_names = ("DP", "TP") mesh_2d = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=mesh_dim_names ) pg_ranks_by_dim_name = {} for mesh_dim_name in mesh_dim_names: mesh_dim = mesh_dim_names.index(mesh_dim_name) pg_ranks_by_dim_name[mesh_dim_name] = mesh_2d.mesh.swapdims( -1, mesh_dim ).reshape(-1, mesh_2d.mesh.size(mesh_dim)) tp_mesh = mesh_2d["TP"] tp_group_idx = self.rank // 4 self.assertEqual(tp_mesh.mesh, pg_ranks_by_dim_name["TP"][tp_group_idx]) dp_mesh = mesh_2d["DP"] dp_group_idx = self.rank % 4 self.assertEqual(mesh_2d["DP"].mesh, pg_ranks_by_dim_name["DP"][dp_group_idx]) @with_comms def test_get_item_1d(self): mesh = init_device_mesh(self.device_type, (8,), mesh_dim_names=("dp",)) # Make sure slicing out 1D mesh from a 1D mesh works. dp_mesh = mesh["dp"] self.assertEqual(dp_mesh, mesh) with self.assertRaisesRegex(KeyError, "Invalid mesh_dim_name"): dp_mesh = mesh["dim0"] @with_comms def test_get_item_3d(self): mesh_shape = (2, 2, 2) mesh_dim_names = ("Replicate", "Shard", "TP") mesh_3d = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=mesh_dim_names ) tp_group = [[0, 1], [2, 3], [4, 5], [6, 7]] tp_group_idx = int(self.rank / 2) self.assertEqual(mesh_3d["TP"].mesh.tolist(), tp_group[tp_group_idx]) shard_group = [[0, 2], [1, 3], [4, 6], [5, 7]] shard_group_idx = self.rank % 2 + self.rank // 4 * 2 self.assertEqual(mesh_3d["Shard"].mesh.tolist(), shard_group[shard_group_idx]) replicate_group = [[0, 4], [1, 5], [2, 6], [3, 7]] replicate_group_idx = self.rank % 4 self.assertEqual( mesh_3d["Replicate"].mesh.tolist(), replicate_group[replicate_group_idx] ) # We support both UX for nD slicing. # mesh_3d[["Replicate", "Shard"]] or mesh_3d["Replicate", "Shard"] hsdp_mesh_1 = mesh_3d[["Replicate", "Shard"]] hsdp_mesh_2 = mesh_3d["Replicate", "Shard"] hsdp_group = [[[0, 2], [4, 6]], [[1, 3], [5, 7]]] hsdp_group_idx = self.rank % 2 self.assertEqual(hsdp_mesh_1.mesh.tolist(), hsdp_group[hsdp_group_idx]) self.assertEqual(hsdp_mesh_2.mesh.tolist(), hsdp_group[hsdp_group_idx]) self.assertEqual(hsdp_mesh_1, hsdp_mesh_2) @with_comms def test_cache_and_reuse_submesh_slice_result(self): mesh = init_device_mesh(self.device_type, (2, 4), mesh_dim_names=("dp", "tp")) dp_mesh = mesh["dp"] ref_pg_count = _world.group_count # When we call the "dp" slice second time, it should not create any new pg. # As we are just using the cached result so the pg count should be the same. dp_mesh_2 = mesh["dp"] self.assertEqual(ref_pg_count, _world.group_count) # When we call the "tp" slice, it should not create a new pg, as the "tp" slice would # just reuse the parent mesh pg. tp_mesh = mesh["tp"] self.assertEqual(_world.group_count, ref_pg_count) @with_comms def test_get_item_3d_noncontiguous_slicing(self): mesh_shape = (2, 2, 2) mesh_dim_names = ("dp", "pp", "cp") mesh_3d = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=mesh_dim_names ) # Slice order simply decides which mesh_dim sits on which mesh_dim. # For dp_cp_mesh, cp mesh is the innermost dimension. dp_cp_mesh = mesh_3d["dp", "cp"] expected_mesh_tensor = ( torch.tensor([[0, 1], [4, 5]], dtype=torch.int) if self.rank in (0, 1, 4, 5) else torch.tensor([[2, 3], [6, 7]], dtype=torch.int) ) dp_local_rank = dp_cp_mesh.get_local_rank("dp") self.assertEqual(dp_cp_mesh.mesh, expected_mesh_tensor) cp_mesh = mesh_3d["cp"] # Check on the current dp_local_rank, whether the cp mesh tensor is the same. self.assertEqual(dp_cp_mesh.mesh[dp_local_rank], cp_mesh.mesh) with self.assertRaisesRegex( KeyError, "Invalid mesh_dim_names", ): cp_dp_mesh = mesh_3d["cp", "dp"] @with_comms def test_flatten_mesh(self): mesh_shape = (2, 2, 2) mesh_dim_names = ("dp", "cp", "tp") mesh_3d = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=mesh_dim_names ) # Test flatten contiguous dims dp_cp_mesh = mesh_3d["dp", "cp"] flattened_dp_cp_mesh = dp_cp_mesh._flatten() self.assertEqual(dp_cp_mesh.mesh.flatten(), flattened_dp_cp_mesh.mesh) self.assertEqual(flattened_dp_cp_mesh.mesh_dim_names[0], "dp_cp") root_mesh = _mesh_resources.get_root_mesh(dp_cp_mesh) self.assertEqual(root_mesh, mesh_3d) flatten_mesh_root_dims = _mesh_resources.flatten_name_to_root_dims[root_mesh][ "dp_cp" ] self.assertEqual(flatten_mesh_root_dims, (0, 1)) ref_pg_count = _world.group_count # Calling flatten again should not create a new pg. flattened_dp_cp_mesh_2 = dp_cp_mesh._flatten() self.assertEqual(flattened_dp_cp_mesh, flattened_dp_cp_mesh_2) self.assertEqual(ref_pg_count, _world.group_count) # Test flatten non-contiguous dims dp_tp_mesh = mesh_3d["dp", "tp"] flattened_dp_tp_mesh = dp_tp_mesh._flatten() self.assertEqual(dp_tp_mesh.mesh.flatten(), flattened_dp_tp_mesh.mesh) self.assertEqual(flattened_dp_tp_mesh.mesh_dim_names[0], "dp_tp") root_mesh = _mesh_resources.get_root_mesh(dp_tp_mesh) self.assertEqual(root_mesh, mesh_3d) flatten_mesh_root_dims = _mesh_resources.flatten_name_to_root_dims[root_mesh][ "dp_tp" ] self.assertEqual(flatten_mesh_root_dims, (0, 2)) # Test flatten with a flattened mesh_dim_name cp_tp_mesh = mesh_3d["cp", "tp"] cp_tp_mesh._flatten("dummy") self.assertEqual(mesh_3d["dummy"].mesh_dim_names[0], "dummy") @with_comms def test_reconstruct_mesh_with_flatten_dim(self): mesh_3d = init_device_mesh( self.device_type, (2, 2, 2), mesh_dim_names=("replicate", "shard", "cp") ) shard_cp_mesh = mesh_3d["shard", "cp"]._flatten() hsdp_mesh = mesh_3d["replicate", "shard_cp"] expected_mesh_tensor = torch.tensor( [[0, 1, 2, 3], [4, 5, 6, 7]], dtype=torch.int ) self.assertEqual(hsdp_mesh.mesh, expected_mesh_tensor) self.assertEqual(shard_cp_mesh.get_group(), mesh_3d["shard_cp"].get_group()) self.assertEqual( shard_cp_mesh.get_group(), mesh_3d.get_group(mesh_dim="shard_cp") ) mesh_3d = init_device_mesh( self.device_type, (2, 2, 2), mesh_dim_names=("dp", "cp", "tp") ) dp_cp_mesh = mesh_3d["dp", "cp"]._flatten() spmd_mesh = mesh_3d["dp_cp", "tp"] expected_mesh_tensor = torch.tensor( [[0, 1], [2, 3], [4, 5], [6, 7]], dtype=torch.int ) self.assertEqual(spmd_mesh.mesh, expected_mesh_tensor) self.assertEqual(dp_cp_mesh.get_group(), mesh_3d["dp_cp"].get_group()) self.assertEqual(dp_cp_mesh.get_group(), mesh_3d.get_group(mesh_dim="dp_cp")) class TestMeshEnv(DTensorTestBase): @property def world_size(self): return 8 @with_comms def test_get_root_mesh(self): mesh_3d = init_device_mesh( self.device_type, (2, 2, 2), mesh_dim_names=("dp", "cp", "tp") ) dp_cp_mesh = mesh_3d["dp", "cp"] dp_tp_mesh = mesh_3d["dp", "tp"] cp_tp_mesh = mesh_3d["cp", "tp"] dp_mesh = mesh_3d["dp"] cp_mesh = mesh_3d["cp"] tp_mesh = mesh_3d["tp"] self.assertEqual(_mesh_resources.get_root_mesh(dp_cp_mesh), mesh_3d) self.assertEqual(_mesh_resources.get_root_mesh(dp_tp_mesh), mesh_3d) self.assertEqual(_mesh_resources.get_root_mesh(cp_tp_mesh), mesh_3d) self.assertEqual(_mesh_resources.get_root_mesh(dp_mesh), mesh_3d) self.assertEqual(_mesh_resources.get_root_mesh(cp_mesh), mesh_3d) self.assertEqual(_mesh_resources.get_root_mesh(tp_mesh), mesh_3d) @with_comms def test_get_root_mesh_dim_exist(self): mesh_shape = (2, self.world_size // 2) mesh_dim_names = ("DP", "TP") mesh_2d = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=mesh_dim_names ) self.assertEqual(_mesh_resources.get_root_mesh_dim(mesh_2d["DP"]), 0) self.assertEqual(_mesh_resources.get_root_mesh_dim(mesh_2d["TP"]), 1) @with_comms def test_get_root_mesh_dim_not_exist(self): mesh_shape = (self.world_size,) mesh = init_device_mesh(self.device_type, mesh_shape) self.assertEqual(_mesh_resources.get_root_mesh_dim(mesh), None) @with_comms def test_get_mesh_dim_by_name(self): mesh_shape = (2, self.world_size // 2) mesh_dim_names = ("DP", "TP") mesh_2d = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=mesh_dim_names ) self.assertEqual(_mesh_resources.get_mesh_dim_by_name(mesh_2d, "DP"), 0) self.assertEqual(_mesh_resources.get_mesh_dim_by_name(mesh_2d, "TP"), 1) @with_comms def test_get_all_submeshes(self): mesh_2d = init_device_mesh( self.device_type, (2, 4), mesh_dim_names=("replicate", "shard") ) all_submeshes = _mesh_resources._get_all_submeshes(mesh_2d, "replicate") self.assertEqual(len(all_submeshes), 4) self.assertEqual( all(submesh.mesh.numel() == 2 for submesh in all_submeshes), True ) class DeviceMeshCollectiveTest(DTensorTestBase): @property def world_size(self): return 8 @with_comms def test_broadcast_1d(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) local_tensor = torch.ones(3, 3, device=self.device_type) * self.rank mesh_broadcast(local_tensor, mesh, mesh_dim=0) self.assertEqual(local_tensor, torch.zeros(3, 3)) @with_comms def test_scatter_1d(self): mesh = DeviceMesh(self.device_type, torch.arange(self.world_size)) scatter_tensor_shape = [3, 3, 3] for scatter_dim in range(len(scatter_tensor_shape)): shard_placement = Shard(scatter_dim) scatter_tensor_shape[scatter_dim] *= self.world_size # make the random seed same across rank torch.manual_seed(0) global_tensor = torch.randn(scatter_tensor_shape, device=self.device_type) splitted_list, _ = shard_placement._split_tensor( global_tensor, mesh.size(), with_padding=True, contiguous=True ) recv_tensor = torch.empty_like(splitted_list[mesh.get_rank()]) # scatter on dim > 0 would generate non-contiguous tensor, verify that works mesh_scatter(recv_tensor, splitted_list, mesh, mesh_dim=0) self.assertEqual(recv_tensor, splitted_list[mesh.get_rank()]) @with_comms def test_scatter_uneven(self): device_mesh = DeviceMesh(self.device_type, list(range(self.world_size))) my_rank = device_mesh.get_rank() tensor_to_split = torch.randn( device_mesh.size() + 3, device_mesh.size() + 1, device=self.device_type ) for shard_dim in range(tensor_to_split.ndim): shard_placement = Shard(shard_dim) tensor_to_scatter = tensor_to_split.clone() tensor_splitted_list = list( torch.chunk(tensor_to_split, self.world_size, dim=shard_dim) ) for _ in range(self.world_size - len(tensor_splitted_list)): tensor_splitted_list.append(torch.tensor([], device=self.device_type)) padded_tensor_list, pad_sizes = shard_placement._split_tensor( tensor_to_scatter, device_mesh.size(), with_padding=True, contiguous=True, ) scattered_tensor = torch.empty_like(padded_tensor_list[my_rank]) mesh_scatter(scattered_tensor, padded_tensor_list, device_mesh, mesh_dim=0) if pad_sizes[my_rank] != 0: scattered_tensor = unpad_tensor( scattered_tensor, shard_dim, pad_sizes[my_rank] ) if scattered_tensor.numel() == 0: # We need to check numel() instead of size if a tensor is ([]) after unpadding, # since the size could be ([0, 8]) after unpadding. self.assertEqual( scattered_tensor.numel(), tensor_splitted_list[my_rank].numel() ) else: self.assertEqual( scattered_tensor.size(), tensor_splitted_list[my_rank].size() ) self.assertEqual(scattered_tensor, tensor_splitted_list[my_rank]) @with_comms def test_all_gather_uneven(self): device_mesh = DeviceMesh(self.device_type, list(range(self.world_size))) my_rank = device_mesh.get_rank() tensor_to_split = torch.ones( device_mesh.size() + 3, device_mesh.size() + 1, device=self.device_type, ) for shard_dim in range(tensor_to_split.ndim): shard_placement = Shard(shard_dim) tensor_padded_list, pad_sizes = shard_placement._split_tensor( tensor_to_split, device_mesh.size(), with_padding=True, contiguous=True, ) local_tensor = tensor_padded_list[my_rank] big_tensor = funcol.all_gather_tensor( local_tensor, gather_dim=shard_dim, group=(device_mesh, 0) ) big_tensor_chunks = list( torch.chunk(big_tensor, device_mesh.size(), dim=shard_dim) ) unpadded_list = [ ( unpad_tensor(big_tensor, shard_dim, pad_sizes[i]) if pad_sizes[i] > 0 else big_tensor ) for i, big_tensor in enumerate(big_tensor_chunks) ] all_gathered_tensor = torch.cat(unpadded_list, dim=shard_dim) self.assertEqual(all_gathered_tensor.size(), tensor_to_split.size()) self.assertEqual(all_gathered_tensor, tensor_to_split) @with_comms def test_reduce_scatter_contiguous(self): device_mesh = DeviceMesh(self.device_type, list(range(self.world_size))) my_rank = device_mesh.get_rank() # Init the tensor step = self.world_size * 2 total_elem = step**2 tensor = torch.arange(0, total_elem).view(step, -1).to(device=self.device_type) tensor = tensor * (my_rank + 1) # Get non-contiguous tensor by slicing tensor_to_reduce = tensor[::2, :2] tensor_contiguous = tensor_to_reduce.clone().contiguous() # Partial to Shard to trigger reduce_scatter tensor_to_reduce = DTensor.from_local( tensor_to_reduce, device_mesh, [_Partial()] ) tensor_contiguous = DTensor.from_local( tensor_contiguous, device_mesh, [_Partial()] ) new_tensor = tensor_to_reduce.redistribute(device_mesh, [Shard(0)]) new_tensor_contiguous = tensor_contiguous.redistribute(device_mesh, [Shard(0)]) # The output for contiguous and non-contiguous tensors of the same value # should return the same reducescatter value. new_tensor_local = new_tensor._local_tensor new_tensor_contiguous_local = new_tensor_contiguous._local_tensor self.assertEqual(new_tensor_local, new_tensor_contiguous_local) self.assertEqual(list(new_tensor_local.size()), [1, 2]) # Check the reduce numerical value sum_base = (1 + self.world_size) * self.world_size / 2 first_elem = my_rank * sum_base * step * 2 expected_tensor = torch.tensor( [[first_elem, first_elem + sum_base]], dtype=new_tensor_local.dtype, device=self.device_type, ) self.assertEqual(new_tensor_local, expected_tensor) @with_comms def test_reduce_scatter_uneven(self): device_mesh = DeviceMesh(self.device_type, list(range(self.world_size))) my_rank = device_mesh.get_rank() tensor_to_split = ( torch.ones( device_mesh.size() + 3, device_mesh.size() + 1, device=self.device_type, ) * self.rank ) for shard_dim in range(tensor_to_split.ndim): shard_placement = Shard(shard_dim) tensor_to_scatter = tensor_to_split.clone() tensor_splitted_list = list( torch.chunk(tensor_to_split, self.world_size, dim=shard_dim) ) for _ in range(self.world_size - len(tensor_splitted_list)): tensor_splitted_list.append(torch.tensor([], device=self.device_type)) padded_tensor_list, pad_sizes = shard_placement._split_tensor( tensor_to_scatter, device_mesh.size(), with_padding=True, contiguous=True, ) tensor_to_reduce = torch.cat(padded_tensor_list, shard_dim) res_num = ((0 + self.world_size - 1) * self.world_size) / 2 scattered_tensor = funcol.reduce_scatter_tensor( tensor_to_reduce, reduceOp="sum", scatter_dim=shard_dim, group=(device_mesh, 0), ) # unpad scattered_tensor if pad_sizes[my_rank] > 0: scattered_tensor = unpad_tensor( scattered_tensor, shard_dim, pad_sizes[my_rank] ) if scattered_tensor.numel() == 0: # We need to check numel() instead of size if a tensor is ([]) after unpadding, # since the size could be ([0, 8]) after unpadding. self.assertEqual( scattered_tensor.numel(), tensor_splitted_list[my_rank].numel() ) else: self.assertEqual( scattered_tensor.size(), tensor_splitted_list[my_rank].size() ) self.assertEqual( scattered_tensor, torch.ones_like(tensor_splitted_list[my_rank]) * res_num, ) @with_comms def test_broadcast_nd(self): mesh_tensor = torch.arange(8).reshape(2, 2, 2) mesh = DeviceMesh(self.device_type, mesh_tensor) local_tensor = torch.ones(3, 3, device=self.device_type) * self.rank # check all dim groups dim_to_subgroups = mesh.get_all_groups() for dim, dim_group in enumerate(dim_to_subgroups): dim_group_size = get_world_size(dim_group) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] cloned_local_tensor = local_tensor.clone() mesh_broadcast(cloned_local_tensor, mesh, mesh_dim=dim) res_num = global_ranks[0] self.assertEqual(cloned_local_tensor, torch.ones(3, 3) * res_num) @with_comms def test_scatter_nd(self): mesh_tensor = torch.arange(8).reshape(2, 2, 2) mesh = DeviceMesh(self.device_type, mesh_tensor) # check all dim groups dim_to_subgroups = mesh.get_all_groups() for dim, dim_group in enumerate(dim_to_subgroups): dim_group_size = get_world_size(dim_group) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] scattered_tensors = [ torch.ones(3, 3, device=self.device_type) * global_rank for global_rank in global_ranks ] received_tensor = torch.empty_like( scattered_tensors[mesh.get_coordinate()[dim]] ) mesh_scatter(received_tensor, scattered_tensors, mesh, mesh_dim=dim) self.assertEqual(received_tensor, torch.ones(3, 3) * self.rank) if __name__ == "__main__": run_tests()
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
test_device_mesh_reuse_default_group
def test_device_mesh_reuse_default_group(self): mesh = init_device_mesh(self.device_type, (self.world_size,)) mesh_group = mesh.get_group() default_group = _get_default_group() if torch.cuda.is_available(): self.assertNotEqual(mesh_group, default_group) self.assertEqual(get_world_size(mesh_group), get_world_size(default_group)) else: self.assertEqual(mesh_group, default_group)
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class DeviceMeshTestGlooBackend(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
world_size
def world_size(self): return 4
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class DeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_data_parallel.py
__init__
def __init__(self, t): super().__init__() self.register_buffer('t_rg', t) self.register_buffer('t_not_rg', t.clone().detach())
def __init__(self, t): super().__init__() self.t_rg = nn.Buffer(t) self.t_not_rg = nn.Buffer(t.clone().detach())
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestModule(nn.Module): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestModule(nn.Module): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_device_mesh.py
world_size
def world_size(self): return 4
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class DeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
test_from_group_with_mesh_shape
def test_from_group_with_mesh_shape(self): """Tests ``from_group`` when passing ``mesh_shape`` as 2D.""" # Consider two different logical views of the same mesh: # - (4, 2) ("dp", "tp") mesh # - (2, 2, 2) ("dp_replicate", "dp_shard", "tp") mesh mesh_shape = (2, 2, 2) mesh_dim_names = ("dp_replicate", "dp_shard", "tp") ref_mesh = init_device_mesh( self.device_type, mesh_shape, mesh_dim_names=mesh_dim_names ) dp_shard_group = ref_mesh["dp_shard"].get_group() dp_replicate_group = ref_mesh["dp_replicate"].get_group() dp_mesh = DeviceMesh.from_group( [dp_replicate_group, dp_shard_group], self.device_type, mesh=ref_mesh.mesh[:, :, ref_mesh.get_local_rank(2)], mesh_dim_names=mesh_dim_names[:2], ) ref_mesh_dp_dim_group_infos = ref_mesh._dim_group_infos[:2] for (_, ref_ranks, _), (_, ranks, _) in zip( ref_mesh_dp_dim_group_infos, dp_mesh._dim_group_infos ): self.assertEqual(ref_ranks, ranks) # Cannot check directly for mesh equality since parent meshes are not # the same since the ref's parent mesh is 3D self.assertEqual(dp_mesh["dp_replicate"].mesh, ref_mesh["dp_replicate"].mesh) for (_, ref_ranks, _), (_, ranks, _) in zip( dp_mesh["dp_replicate"]._dim_group_infos, ref_mesh["dp_replicate"]._dim_group_infos, ): self.assertEqual(ref_ranks, ranks) self.assertEqual(dp_mesh["dp_shard"].mesh, ref_mesh["dp_shard"].mesh) for (_, ref_ranks, _), (_, ranks, _) in zip( dp_mesh["dp_shard"]._dim_group_infos, ref_mesh["dp_shard"]._dim_group_infos ): self.assertEqual(ref_ranks, ranks)
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class DeviceMeshTestNDim(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
world_size
def world_size(self): return 4
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class DeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
test_raises_mesh_shape_mesh_dim_names_mismatch
def test_raises_mesh_shape_mesh_dim_names_mismatch(self): with self.assertRaisesRegex( RuntimeError, "mesh_shape and mesh_dim_names should have same length!", ): mesh = init_device_mesh( self.device_type, (8,), mesh_dim_names=["dp", "tp"], )
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class InitDeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_data_parallel.py
__init__
def __init__(self, t): super().__init__() self.register_buffer('t_rg', t) self.register_buffer('t_not_rg', t.clone().detach())
def __init__(self, t): super().__init__() self.t_rg = nn.Buffer(t) self.t_not_rg = nn.Buffer(t.clone().detach())
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestModule(nn.Module): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestModule(nn.Module): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_data_parallel.py
__init__
def __init__(self, t): super().__init__() self.register_buffer('t_rg', t) self.register_buffer('t_not_rg', t.clone().detach())
def __init__(self, t): super().__init__() self.t_rg = nn.Buffer(t) self.t_not_rg = nn.Buffer(t.clone().detach())
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestModule(nn.Module): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestModule(nn.Module): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_data_parallel.py
__init__
def __init__(self, t): super().__init__() self.register_buffer('t_rg', t) self.register_buffer('t_not_rg', t.clone().detach())
def __init__(self, t): super().__init__() self.t_rg = nn.Buffer(t) self.t_not_rg = nn.Buffer(t.clone().detach())
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestModule(nn.Module): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestModule(nn.Module): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_device_mesh.py
world_size
def world_size(self): return 4
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class DeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
test_reconstruct_mesh_with_flatten_dim
def test_reconstruct_mesh_with_flatten_dim(self): mesh_3d = init_device_mesh( self.device_type, (2, 2, 2), mesh_dim_names=("replicate", "shard", "cp") ) shard_cp_mesh = mesh_3d["shard", "cp"]._flatten() hsdp_mesh = mesh_3d["replicate", "shard_cp"] expected_mesh_tensor = torch.tensor( [[0, 1, 2, 3], [4, 5, 6, 7]], dtype=torch.int ) self.assertEqual(hsdp_mesh.mesh, expected_mesh_tensor) self.assertEqual(shard_cp_mesh.get_group(), mesh_3d["shard_cp"].get_group()) self.assertEqual( shard_cp_mesh.get_group(), mesh_3d.get_group(mesh_dim="shard_cp") ) mesh_3d = init_device_mesh( self.device_type, (2, 2, 2), mesh_dim_names=("dp", "cp", "tp") ) dp_cp_mesh = mesh_3d["dp", "cp"]._flatten() spmd_mesh = mesh_3d["dp_cp", "tp"] expected_mesh_tensor = torch.tensor( [[0, 1], [2, 3], [4, 5], [6, 7]], dtype=torch.int ) self.assertEqual(spmd_mesh.mesh, expected_mesh_tensor) self.assertEqual(dp_cp_mesh.get_group(), mesh_3d["dp_cp"].get_group()) self.assertEqual(dp_cp_mesh.get_group(), mesh_3d.get_group(mesh_dim="dp_cp"))
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class TestDeviceMeshGetItem(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
test_device_mesh_init_backend
def test_device_mesh_init_backend(self): mesh = DeviceMesh(self.device_type, [1], _init_backend=False) with self.assertRaisesRegex(RuntimeError, "process groups not initialized!"): mesh.get_group() # coordinates should always been populated when init_backend is False, as whenever # we call init_backend we should make sure the default pg already created mesh.get_coordinate()
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class DeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
test_from_group_with_invalid_mesh
def test_from_group_with_invalid_mesh(self): global_pg = _get_default_group() global_pg_size = global_pg.size() assert global_pg_size == 4, "Test assumes global world size of 4" invalid_mesh = [[0, 1], [2, 3]] # 2D mesh when we need 1D regex = r"Invalid mesh \[\[0, 1\], \[2, 3\]\] for ProcessGroup with ranks \[0, 1, 2, 3\]" with self.assertRaisesRegex(ValueError, regex): DeviceMesh.from_group(global_pg, "cuda", invalid_mesh) device_mesh = init_device_mesh(self.device_type, (2, 2)) groups = device_mesh.get_all_groups() invalid_mesh = (0, 1, 2, 3) # 1D mesh when we need 2D regex = r"Expects mesh with ndim equal to number of ProcessGroups but got mesh \[0, 1, 2, 3\] and 2 ProcessGroups" with self.assertRaisesRegex(ValueError, regex): DeviceMesh.from_group(groups, self.device_type, invalid_mesh)
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class DeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
test_set_mesh_dim_group_options
def test_set_mesh_dim_group_options(self): device_type = "cuda" if torch.cuda.is_available() else "cpu" _mesh_resources._set_mesh_dim_group_options(1, "fake", None) mesh_tensor = torch.arange(4).reshape(2, 2) mesh = DeviceMesh(device_type, mesh_tensor) self.assertEqual(mesh.get_group(1)._get_backend_name(), "fake")
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class DeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_data_parallel.py
__init__
def __init__(self, t): super().__init__() self.register_buffer('t_rg', t) self.register_buffer('t_not_rg', t.clone().detach())
def __init__(self, t): super().__init__() self.t_rg = nn.Buffer(t) self.t_not_rg = nn.Buffer(t.clone().detach())
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestModule(nn.Module): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestModule(nn.Module): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_data_parallel.py
__init__
def __init__(self, t): super().__init__() self.register_buffer('t_rg', t) self.register_buffer('t_not_rg', t.clone().detach())
def __init__(self, t): super().__init__() self.t_rg = nn.Buffer(t) self.t_not_rg = nn.Buffer(t.clone().detach())
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestModule(nn.Module): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestModule(nn.Module): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_data_parallel.py
__init__
def __init__(self, t): super().__init__() self.register_buffer('t_rg', t) self.register_buffer('t_not_rg', t.clone().detach())
def __init__(self, t): super().__init__() self.t_rg = nn.Buffer(t) self.t_not_rg = nn.Buffer(t.clone().detach())
import contextlib import io from copy import deepcopy from collections import OrderedDict from itertools import product import functools import torch from torch import nn from torch.cuda.amp import autocast import torch.nn.parallel as dp from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes, onlyCUDA, skipMeta from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck from torch.testing._internal.common_utils import dtype2prec_DONTUSE from torch.testing._internal.common_utils import sandcastle_skip_if import torch.nn.functional as F NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial(_assertGradAndGradgradChecks, check_batched_grad=False) class TestModule(nn.Module): import gc
import contextlib import functools import io from collections import OrderedDict from copy import deepcopy from itertools import product import torch import torch.nn.functional as F import torch.nn.parallel as dp from torch import nn from torch.cuda.amp import autocast from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, onlyCUDA, skipMeta, ) from torch.testing._internal.common_utils import ( _assertGradAndGradgradChecks, dtype2prec_DONTUSE, gradcheck, run_tests, skip_but_pass_in_sandcastle_if, TestCase, ) NO_NCCL = not hasattr(torch.distributed, "ProcessGroupNCCL") gradcheck = functools.partial(gradcheck, check_batched_grad=False) _assertGradAndGradgradChecks = functools.partial( _assertGradAndGradgradChecks, check_batched_grad=False ) class TestModule(nn.Module): import gc
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_device_mesh.py
world_size
def world_size(self): return 4
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class DeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
test_get_all_submeshes
def test_get_all_submeshes(self): mesh_2d = init_device_mesh( self.device_type, (2, 4), mesh_dim_names=("replicate", "shard") ) all_submeshes = _mesh_resources._get_all_submeshes(mesh_2d, "replicate") self.assertEqual(len(all_submeshes), 4) self.assertEqual( all(submesh.mesh.numel() == 2 for submesh in all_submeshes), True )
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class TestMeshEnv(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
world_size
def world_size(self): return 4
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class DeviceMeshTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_device_mesh.py
test_scatter_nd
def test_scatter_nd(self): mesh_tensor = torch.arange(8).reshape(2, 2, 2) mesh = DeviceMesh(self.device_type, mesh_tensor) # check all dim groups dim_to_subgroups = mesh.get_all_groups() for dim, dim_group in enumerate(dim_to_subgroups): dim_group_size = get_world_size(dim_group) global_ranks = [ get_global_rank(dim_group, i) for i in range(dim_group_size) ] scattered_tensors = [ torch.ones(3, 3, device=self.device_type) * global_rank for global_rank in global_ranks ] received_tensor = torch.empty_like( scattered_tensors[mesh.get_coordinate()[dim]] ) mesh_scatter(received_tensor, scattered_tensors, mesh, mesh_dim=dim) self.assertEqual(received_tensor, torch.ones(3, 3) * self.rank)
import os import torch import torch.distributed._functional_collectives as funcol from torch.distributed._tensor import DTensor from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh from torch.distributed.distributed_c10d import ( _get_default_group, _world, get_global_rank, get_world_size, init_process_group, is_initialized, is_nccl_available, ProcessGroup, ) from torch.distributed.tensor._collective_utils import ( mesh_broadcast, mesh_scatter, unpad_tensor, ) from torch.distributed.tensor.placement_types import _Partial, Shard from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, with_comms, ) from torch.testing._internal.distributed.fake_pg import FakeStore class DeviceMeshCollectiveTest(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
init_weights
def init_weights(m): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) m.bias.data.fill_(0.01) class ToyModel(nn.Module): def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] ) def forward(self, inputs): return self.net(inputs)
def init_weights(m): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) m.bias.data.fill_(0.01) class ToyModel(nn.Module): def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] ) def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
__init__
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
get_model
def get_model(device, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5): m = ToyModel(in_feat=in_feat, hidden_feat=hidden_feat, out_feat=out_feat).to(device) m.apply(init_weights) inputs = torch.rand(bsz, in_feat).to(device) outputs = m(inputs) return m, inputs, outputs
def get_model( device, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None ): m = ToyModel( in_feat=in_feat, hidden_feat=hidden_feat, out_feat=out_feat, ctx_manager=ctx_manager, ).to(device) m.apply(init_weights) inputs = torch.rand(bsz, in_feat).to(device) outputs = m(inputs) return m, inputs, outputs class MutatingModel(nn.Module): def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] ) self.state = 1 def forward(self, inputs): self.state = 2 return self.net(inputs) * self.state
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
__init__
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
__init__
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
get_toy_model_for_activation_checkpointing
def get_toy_model_for_activation_checkpointing(device): m = ToyOuterModel(device).to(device) m.apply(init_weights) inputs = torch.rand(100, 100).to(device) return m, inputs
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
find_first_node
def find_first_node(gm, func): for node in gm.graph.nodes: if node.target is func: return node return None
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
get_custom_model
def get_custom_model(device): class MyCustomLinear(torch.nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.randn(512, 512)) def forward(self, x): tmp = torch.mm(x, self.weight.t()) # test an edge case where torch.where.scalar was decomposed to aten.where.self(tensor, tensor, tensor) # and the tensors T(0.4) and T(0.5) were not wrapped in FakeTensors during DDPOptimizer compilation return tmp + torch.where(tmp < 0.5, 0.3, 0.6) class MyLinear(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(512, 512) def forward(self, x): return self.linear(x) class MyModule(torch.nn.Module): def __init__(self): super().__init__() mods = [ (MyLinear(), torch.nn.ReLU()), # sandwich the custom in the middle so it comes before and after (MyCustomLinear(), torch.nn.ReLU()), (MyLinear(), torch.nn.ReLU()), ] self.seq = torch.nn.Sequential(*[x for items in mods for x in items]) def forward(self, x, y): # test special case where the 0th bucket (layers close to graph input) is at capacity, which would # trigger a new bucket, but there are only trivial ops without parameters to put into the new bucket. # optimize this case by fusing that 'empty bucket' back together with the previous full one return self.seq(x + y) m = MyModule().to(device) m.apply(init_weights) inputs = torch.rand((512, 512)).to(device) # test duplicated inputs inputs = (inputs, inputs) correct_outputs = m(*inputs) return m, inputs, correct_outputs
def get_custom_model(device): class MyCustomLinear(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = nn.Parameter(torch.randn(512, 512)) def forward(self, x): tmp = torch.mm(x, self.weight.t()) # test an edge case where torch.where.scalar was decomposed to aten.where.self(tensor, tensor, tensor) # and the tensors T(0.4) and T(0.5) were not wrapped in FakeTensors during DDPOptimizer compilation return tmp + torch.where(tmp < 0.5, 0.3, 0.6) class MyLinear(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(512, 512) def forward(self, x): return self.linear(x) class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() mods = [ (MyLinear(), torch.nn.ReLU()), # sandwich the custom in the middle so it comes before and after (MyCustomLinear(), torch.nn.ReLU()), (MyLinear(), torch.nn.ReLU()), ] self.seq = torch.nn.Sequential(*[x for items in mods for x in items]) def forward(self, x, y): # test special case where the 0th bucket (layers close to graph input) is at capacity, which would # trigger a new bucket, but there are only trivial ops without parameters to put into the new bucket. # optimize this case by fusing that 'empty bucket' back together with the previous full one return self.seq(x + y) m = MyModule().to(device) m.apply(init_weights) inputs = torch.rand((512, 512)).to(device) # test duplicated inputs inputs = (inputs, inputs) correct_outputs = m(*inputs) return m, inputs, correct_outputs
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
__init__
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
__init__
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
__init__
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified