library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/distributed/test_c10d_common.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
test_is_backend_available
|
def test_is_backend_available(self):
self.assertEqual(dist.is_ucc_available(), dist.is_backend_available("ucc"))
self.assertFalse(dist.is_backend_available("dummy"))
dist.Backend.register_backend(
"dummy", PythonProcessGroupExtensionTest.create_dummy
)
self.assertTrue(dist.is_backend_available("dummy"))
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class PythonProcessGroupExtensionTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_common.py
|
test_backend_config
|
def test_backend_config(self):
dist.Backend.register_backend(
"dummy", PythonProcessGroupExtensionTest.create_dummy
)
# Ensure backend config can be created with the following arguments
backend_config_strings_and_expected_values = [
(dist.Backend.GLOO, "cpu:gloo,cuda:gloo"),
(dist.Backend.NCCL, "cuda:nccl"),
(dist.Backend.MPI, "cpu:mpi,cuda:mpi"),
(dist.Backend.UCC, "cpu:ucc,cuda:ucc"),
(dist.Backend.DUMMY, "cpu:dummy,cuda:dummy"),
("DUMMY", "cpu:dummy,cuda:dummy"),
("dummy", "cpu:dummy,cuda:dummy"),
("cpu:dummy,cuda:dummy", "cpu:dummy,cuda:dummy"),
("cpu:dummy,cuda:nccl", "cpu:dummy,cuda:nccl"),
("cpu:gloo,cuda:dummy", "cpu:gloo,cuda:dummy"),
("cpu:gloo,cuda:nccl", "cpu:gloo,cuda:nccl"),
]
for config_str, expected_value in backend_config_strings_and_expected_values:
with self.subTest(config_str):
# ensures these configs strings are valid and no ValueError is raised
config = dist.BackendConfig(config_str)
self.assertEqual(str(config), expected_value)
# Ensure backend config will raise ValueError with the following arguments
invalid_backend_config_strings = [
"cpu:gloo,cuda:nccl,", # trailing comma
"cpu:gloo,cuda:nccl,cpu:dummy", # duplicate device
]
for config_str in invalid_backend_config_strings:
with self.subTest(config_str):
with self.assertRaises(ValueError):
dist.BackendConfig(config_str)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class PythonProcessGroupExtensionTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_common.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
_test_work_wait
|
def _test_work_wait(self, x: torch.Tensor, comm_fn: Callable):
pg = self._get_default_group()
def fn(x: torch.Tensor) -> torch.Tensor:
# N.B.: explicitly wrapping with CommTensor instead of updating
# all_reduce Python implementation, as the later will need more
# discussion.
y = CommTensor(x + x)
work, z = comm_fn(y, group=pg)
# this wait() will be ignored in tracing mode as
# ProxyTorchDispatchMode only supports torch.Tensor, _ProxyTensor,
# and torch.nn.Parameter objects
work.wait()
if isinstance(z, list):
return [zz * 2 for zz in z]
elif isinstance(z, torch.Tensor):
return z * 2
else:
raise RuntimeError("Unexpected return type")
xx = x.clone()
# trace fn into a GraphModule
traced_fn = make_fx(fn)(xx)
traced_fn.graph.lint()
traced_fn.graph.eliminate_dead_code()
# make sure the mul op indeed waits for comm
for node in traced_fn.graph.nodes:
if node.op == "call_function" and "mul.Tensor" in node.target.__name__:
prev = node.args[0]
curr = None
waited = False
commed = False
while prev is not None and not commed:
curr = prev
waited |= all([
curr.op == "call_function",
curr.target == _wait_comm,
])
commed |= all([
curr.op == "call_function",
CommTensor._is_supported(curr.target.__name__),
])
prev = curr.args[0]
self.assertTrue(waited)
self.assertTrue(commed)
# Update input to make sure we are not recording it as constant during
# tracing.
x += 1
xx += 1
y = fn(x)
yy = traced_fn(xx)
# check correctness
self.assertEqual(y, yy)
xx += 1
yy = traced_fn(xx)
self.assertNotEqual(y, yy)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class CompilerTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_common.py
|
comm_fn
|
def comm_fn(tensor, group=None):
work = dist.all_reduce(tensor, group=group, async_op=True)
return work, tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_common.py
|
comm_fn
|
def comm_fn(tensor, group=None):
work = dist.all_reduce(tensor, group=group, async_op=True)
return work, tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_common.py
|
comm_fn
|
def comm_fn(tensor, group=None):
work = dist.all_reduce(tensor, group=group, async_op=True)
return work, tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_common.py
|
comm_fn
|
def comm_fn(tensor, group=None):
work = dist.all_reduce(tensor, group=group, async_op=True)
return work, tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_common.py
|
testWithoutEnv
|
def testWithoutEnv(self):
with self.assertRaisesRegex(RuntimeError, "LOCAL_RANK"):
dist.get_node_local_rank()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class LocalRankTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_common.py
|
testWithoutEnvWithFallback
|
def testWithoutEnvWithFallback(self):
self.assertEqual(dist.get_node_local_rank(fallback_rank=2), 2)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class LocalRankTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_common.py
|
testNodeLocalRankOverridesFallback
|
def testNodeLocalRankOverridesFallback(self):
os.environ["LOCAL_RANK"] = str(self.rank)
self.assertEqual(dist.get_node_local_rank(fallback_rank=123), self.rank)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class LocalRankTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_common.py
|
testNodeLocalRank
|
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
def testNodeLocalRank(self):
os.environ["LOCAL_RANK"] = str(self.rank)
self.assertEqual(dist.get_node_local_rank(), self.rank)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class LocalRankTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/test_c10d_functional_native.py
|
load_test_module
|
def load_test_module(name):
import sys
from importlib.machinery import SourceFileLoader
from pathlib import Path
from unittest import mock
testdir = Path(__file__).absolute().parent.parent
with mock.patch("sys.path", [*sys.path, str(testdir)]):
return SourceFileLoader(
name, str(testdir / f"{name.replace('.', '/')}.py")
).load_module()
AOTIRunnerUtil = load_test_module("inductor.test_aot_inductor_utils").AOTIRunnerUtil
import sys
if not dist.is_available():
print("distributed package not available, skipping tests", file=sys.stderr)
sys.exit(0)
@requires_nccl()
class TestWithNCCL(MultiProcessTestCase):
def setUp(self) -> None:
super().setUp()
self._spawn_processes()
@property
def world_size(self) -> int:
return 2
@property
def ranks(self) -> List[int]:
return list(range(self.world_size))
@property
def device(self) -> torch.device:
return torch.device(f"cuda:{self.rank}")
def _init_process_group(self) -> None:
# Allow testing aoti after torch.compile
torch._inductor.config.triton.store_cubin = True
torch._inductor.config.debug = True
torch.cuda.set_device(self.device)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
)
torch._C._distributed_c10d._register_process_group("default", dist.group.WORLD)
@skip_if_lt_x_gpu(2)
def test_all_reduce_single(self) -> None:
self._init_process_group()
input = torch.full((10, 10), float(self.rank), device=self.device)
output = torch.ops._c10d_functional.all_reduce(
input,
"avg",
"default",
)
output = torch.ops._c10d_functional.wait_tensor(output)
assert id(output) != id(input)
expect = sum(self.ranks) / self.world_size
assert output.eq(expect).all()
# Test Python API and AsyncCollectiveTensor
output = all_reduce(
input,
"avg",
"default",
)
assert isinstance(output, AsyncCollectiveTensor)
assert not output.completed
assert output.eq(expect).all()
assert output.completed
@skip_if_lt_x_gpu(2)
def test_all_reduce_single_(self) -> None:
self._init_process_group()
input = torch.full((10, 10), float(self.rank), device=self.device)
output = torch.ops._c10d_functional.all_reduce_(
input,
"avg",
"default",
)
output = torch.ops._c10d_functional.wait_tensor(output)
assert id(output) == id(input)
expect = sum(self.ranks) / self.world_size
assert output.eq(expect).all()
@skip_if_lt_x_gpu(2)
def test_all_reduce_coalesced(self) -> None:
self._init_process_group()
inputs = [
torch.full((i, i), float(self.rank * i), device=self.device)
for i in range(10)
]
outputs = torch.ops._c10d_functional.all_reduce_coalesced(
inputs,
"avg",
"default",
)
for i, (output, input) in enumerate(zip(outputs, inputs)):
output = torch.ops._c10d_functional.wait_tensor(output)
assert id(output) != id(input)
assert output.eq(sum(self.ranks) / self.world_size * i).all()
# Test Python API and AsyncCollectiveTensor
outputs = all_reduce_coalesced(
inputs,
"avg",
"default",
)
for i, (output, input) in enumerate(zip(outputs, inputs)):
assert not output.completed
assert output.eq(sum(self.ranks) / self.world_size * i).all()
assert output.completed
@skip_if_lt_x_gpu(2)
def test_all_reduce_coalesced_(self) -> None:
self._init_process_group()
inputs = [
torch.full((i, i), float(self.rank * i), device=self.device)
for i in range(10)
]
outputs = torch.ops._c10d_functional.all_reduce_coalesced_(
inputs,
"avg",
"default",
)
for i, (output, input) in enumerate(zip(outputs, inputs)):
output = torch.ops._c10d_functional.wait_tensor(output)
assert id(output) == id(input)
assert output.eq(sum(self.ranks) / self.world_size * i).all()
@skip_if_lt_x_gpu(2)
def test_all_gather_into_tensor_single(self) -> None:
self._init_process_group()
input = torch.full((10, 10), float(self.rank), device=self.device)
output = torch.ops._c10d_functional.all_gather_into_tensor(
input,
self.world_size,
"default",
)
output = torch.ops._c10d_functional.wait_tensor(output)
expect = torch.cat(
[
torch.full((10, 10), float(rank), device=self.device)
for rank in self.ranks
]
)
assert torch.allclose(output, expect)
assert output.eq(expect).all()
# Test out-variant of all_gather_into_tensor
output = torch.empty(expect.shape, device=self.device)
output = torch.ops._c10d_functional.all_gather_into_tensor_out(
input,
self.world_size,
"default",
out=output,
)
output = torch.ops._c10d_functional.wait_tensor(output)
assert torch.allclose(output, expect)
assert output.eq(expect).all()
# Test Python API and AsyncCollectiveTensor
output = all_gather_tensor(
input,
0,
"default",
)
assert isinstance(output, AsyncCollectiveTensor)
assert not output.completed
assert output.eq(expect).all()
assert output.completed
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
# https://github.com/pytorch/pytorch/issues/126338
def test_inductor_dtypeview_memory_leak(self):
self._init_process_group()
def func(arg: torch.Tensor) -> torch.Tensor:
ag0 = torch.ops._c10d_functional.all_gather_into_tensor.default(
arg,
self.world_size,
"default",
)
ag0_view = torch.ops.aten.view.dtype(ag0, torch.int32)
return funcol.wait_tensor(ag0_view)
arg = torch.full(
(10, 10),
float(self.rank),
device=self.device,
dtype=torch.float32,
)
compiled = torch.compile(func)
mem_usage = {}
# check if the aten.view.dtype is compiled to aten.view.dtype
code = run_and_get_triton_code(compiled, arg)
(
FileCheck()
.check("torch.ops._c10d_functional.wait_tensor.default(aten.view.dtype")
.run(code)
)
# check memory leak
for i in range(1, 10):
mem_usage[i] = torch.cuda.max_memory_allocated()
compiled(arg)
assert mem_usage[9] == mem_usage[8]
@skip_if_lt_x_gpu(2)
def test_all_gather_into_tensor_coalesced(self) -> None:
self._init_process_group()
inputs = [
torch.full((10, 10), float(self.rank * i), device=self.device)
for i in range(10)
]
outputs = torch.ops._c10d_functional.all_gather_into_tensor_coalesced(
inputs,
self.world_size,
"default",
)
expect = [
torch.cat(
[
torch.full((10, 10), float(rank) * i, device=self.device)
for rank in self.ranks
]
)
for i in range(10)
]
for i, output in enumerate(outputs):
output = torch.ops._c10d_functional.wait_tensor(output)
assert output.eq(expect[i]).all()
# Test Python API and AsyncCollectiveTensor
outputs = all_gather_into_tensor_coalesced(
inputs,
"default",
)
for i, output in enumerate(outputs):
assert not output.completed
assert output.eq(expect[i]).all()
assert output.completed
@skip_if_lt_x_gpu(2)
def test_reduce_scatter_tensor_single(self) -> None:
self._init_process_group()
input = torch.tensor(self.ranks, device=self.device)
output = torch.ops._c10d_functional.reduce_scatter_tensor(
input,
"avg",
self.world_size,
"default",
)
output = torch.ops._c10d_functional.wait_tensor(output)
assert output.eq(self.rank).all()
# Test Python API and AsyncCollectiveTensor
output = reduce_scatter_tensor(
input,
"avg",
0,
"default",
)
assert isinstance(output, AsyncCollectiveTensor)
assert not output.completed
assert output.eq(self.rank).all()
assert output.completed
@skip_if_lt_x_gpu(2)
def test_reduce_scatter_tensor_coalesced(self) -> None:
self._init_process_group()
inputs = [torch.tensor(self.ranks, device=self.device) * i for i in range(10)]
outputs = torch.ops._c10d_functional.reduce_scatter_tensor_coalesced(
inputs,
"avg",
self.world_size,
"default",
)
for i, output in enumerate(outputs):
output = torch.ops._c10d_functional.wait_tensor(output)
assert output.eq(self.rank * i).all()
# Test Python API and AsyncCollectiveTensor
outputs = reduce_scatter_tensor_coalesced(
inputs,
"avg",
[0] * 10,
"default",
)
for i, output in enumerate(outputs):
assert not output.completed
assert output.eq(self.rank * i).all()
assert output.completed
@skip_if_lt_x_gpu(2)
def test_all_to_all_single(self) -> None:
self._init_process_group()
torch.cuda.set_device(self.device)
torch.manual_seed(42)
send_sz_matrix = torch.randint(0, 20, (self.world_size, self.world_size))
input_split_sizes = send_sz_matrix[self.rank].tolist()
output_split_sizes = send_sz_matrix[:, self.rank].tolist()
input = torch.full((sum(input_split_sizes),), float(self.rank)).cuda()
output = torch.ops._c10d_functional.all_to_all_single(
input,
output_split_sizes,
input_split_sizes,
"default",
)
output = torch.ops._c10d_functional.wait_tensor(output)
expect = torch.cat(
[
torch.full((sz,), float(rank)).cuda()
for rank, sz in enumerate(output_split_sizes)
]
)
assert output.eq(expect).all()
# Test Python API and AsyncCollectiveTensor
output = all_to_all_single(
input, output_split_sizes, input_split_sizes, "default"
)
assert not output.completed
assert output.eq(expect).all()
assert output.completed
@skip_if_lt_x_gpu(2)
def test_broadcast(self) -> None:
self._init_process_group()
input = torch.full((10, 10), float(self.rank), device=self.device)
output = torch.ops._c10d_functional.broadcast(
input,
1,
"default",
)
output = torch.ops._c10d_functional.wait_tensor(output)
assert id(output) != id(input)
expect = 1
assert output.eq(expect).all()
# Test Python API and AsyncCollectiveTensor
output = funcol.broadcast(
input,
1,
"default",
)
assert isinstance(output, AsyncCollectiveTensor)
assert not output.completed
assert output.eq(expect).all()
assert output.completed
@skip_if_lt_x_gpu(2)
def test_unwaited(self) -> None:
# Verify that the process can terminate gracefully
# even with unwaited tensors
self._init_process_group()
input = torch.full((10, 10), float(self.rank), device=self.device)
output = torch.ops._c10d_functional.all_reduce(
input,
"avg",
"default",
)
@skip_if_lt_x_gpu(2)
def test_py_work(self) -> None:
self._init_process_group()
wait_called = False
class MyWork(dist.Work):
def wait(self, _):
nonlocal wait_called
wait_called = True
tensor = torch.rand(2, 2)
torch._C._distributed_c10d._register_work(tensor, MyWork())
torch.ops._c10d_functional.wait_tensor(tensor)
self.assertTrue(wait_called)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
@fresh_inductor_cache()
def test_threading(self):
self._init_process_group()
device = torch.device(f"cuda:{self.rank}")
def func(arg: torch.Tensor) -> torch.Tensor:
buf0 = arg + 42
ar0 = funcol.all_reduce(buf0, "avg", "0")
ar0 = funcol.wait_tensor(ar0)
return ar0 + 1
arg = torch.rand(4, 4, device=device)
func(arg)
compiled = torch.compile(func, fullgraph=True)
code = run_and_get_triton_code(compiled, arg)
FileCheck().check("all_reduce_.default(buf0, 'avg', '0')").run(code)
# Unless explicitly specified (e.g. in a custom runtime), the process
# group registry is shared among all threads in a process. Here we
# verify that a process group registered in main thread can be resolved
# in a different thread.
class TestThread(threading.Thread):
def run(self):
self.exc = None
try:
func(arg)
compiled(arg)
except BaseException as exc:
self.exc = exc
def join(self):
threading.Thread.join(self)
if self.exc:
raise self.exc
t = TestThread()
t.start()
t.join()
class CompileTest(TestCase):
def setUp(self):
# Allow testing aoti after torch.compile
torch._inductor.config.triton.store_cubin = True
torch._inductor.config.debug = True
self.rank = 0
self.world_size = 2
torch.cuda.set_device("cuda:0")
store = FakeStore()
dist.init_process_group(
backend="fake",
world_size=self.world_size,
rank=self.rank,
store=store,
)
def tearDown(self):
dist.destroy_process_group()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_inductor_all_reduce_single(self):
def func(arg: torch.Tensor) -> torch.Tensor:
buf0 = arg + 42
# Expect in-place with inductor allocated buf
ar0 = funcol.all_reduce(buf0, "avg", "0")
ar0 = funcol.wait_tensor(ar0)
# Expect no in-place with graph input
ar1 = funcol.all_reduce(arg, "avg", "0")
ar1 = funcol.wait_tensor(ar1)
return ar0, ar1
arg = torch.rand(4, 4, device="cuda")
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, arg)
(
FileCheck()
.check("buf0 = empty")
.check("buf7 = empty")
# Expect in-place with inductor allocated buf
.check("torch.ops._c10d_functional.all_reduce_.default(buf0")
.check("torch.ops._c10d_functional.wait_tensor.default(buf0")
# Expect no in-place with graph input (buf5 is a clone)
.check("torch.ops._c10d_functional.all_reduce_.default(buf7")
.check("torch.ops._c10d_functional.wait_tensor.default(buf7")
# Expect no extra copy on return
.check("return (buf0, buf7, )")
.run(code)
)
assert "= torch.ops._c10d_functional.wait_tensor.default" not in code
# Test aoti
out = AOTIRunnerUtil.run("cuda", func, (arg,))
torch.cuda.synchronize()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_inductor_all_reduce_coalesced(self):
def func(args: List[torch.Tensor]) -> torch.Tensor:
bufs = [arg + 42 for arg in args]
# Expect in-place with inductor allocated buf
ar0 = funcol.all_reduce_coalesced(bufs, "avg", "0")
ar0 = [funcol.wait_tensor(out) for out in ar0]
# Expect no in-place with graph input
ar1 = funcol.all_reduce_coalesced(args, "avg", "0")
ar1 = [funcol.wait_tensor(out) for out in ar1]
return ar0, ar1
args = [torch.rand(4, 4, device="cuda") for _ in range(2)]
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, args)
(
FileCheck()
.check("buf0 = empty")
.check("buf5 = empty")
.check("buf1 = empty")
.check("buf6 = empty")
# Expect in-place with inductor allocated buf
.check(
"torch.ops._c10d_functional.all_reduce_coalesced_"
".default([buf0, buf1]"
)
# Expect no in-place with graph input (buf5, buf6 are clones)
.check(
"torch.ops._c10d_functional.all_reduce_coalesced_"
".default([buf5, buf6]"
)
.check("torch.ops._c10d_functional.wait_tensor.default(buf0")
.check("torch.ops._c10d_functional.wait_tensor.default(buf1")
.check("torch.ops._c10d_functional.wait_tensor.default(buf5")
.check("torch.ops._c10d_functional.wait_tensor.default(buf6")
# Expect no extra copy on return
.check("return (buf0, buf1, buf5, buf6, )")
.run(code)
)
assert "= torch.ops._c10d_functional.wait_tensor.default" not in code
# Test aoti
out = AOTIRunnerUtil.run("cuda", func, (args,))
torch.cuda.synchronize()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_inductor_inplace_op_on_view(self):
def func(arg: torch.Tensor) -> torch.Tensor:
buf0 = (arg + 10)[:2]
ar0 = funcol.all_reduce(buf0, "avg", "0")
ar0 = funcol.wait_tensor(ar0)
return ar0
arg = torch.rand(4, 4, device="cuda")
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, arg)
(
FileCheck()
.check("buf0 = empty")
# Ensure the all_reduce_ input is a view
.check(
"torch.ops._c10d_functional.all_reduce_.default(reinterpret_tensor(buf0"
)
.check(
"torch.ops._c10d_functional.wait_tensor.default(reinterpret_tensor(buf0"
)
.check("return (reinterpret_tensor(buf0")
.run(code)
)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_inductor_reuse_buffer_after_inplace_collective(self):
def func(arg: torch.Tensor) -> torch.Tensor:
# Expect allocation
buf0 = arg + 42
ar0 = funcol.all_reduce(buf0, "avg", "0")
ar0 = funcol.wait_tensor(ar0)
# Expect allocation
buf1 = torch.mm(arg, ar0)
# Expect buf0 to be reused
buf2 = torch.mm(arg, buf1)
return buf1, buf2
arg = torch.rand(4, 4, device="cuda")
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, arg)
(
FileCheck()
# Expect allocation
.check("buf0 = empty")
.check("torch.ops._c10d_functional.all_reduce_.default(buf0")
.check("torch.ops._c10d_functional.wait_tensor.default(buf0")
# Expect allocation
.check("buf7 = empty")
.check("extern_kernels.mm(arg0_1, buf0, out=buf7")
# Expect buf0 to be reused
.check("buf8 = buf0; del buf0 # reuse")
.check("extern_kernels.mm(arg0_1, buf7, out=buf8")
# Expect no extra copy on return
.check("return (buf7, buf8, )")
.run(code)
)
assert "= torch.ops._c10d_functional.wait_tensor.default" not in code
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_inductor_all_gather_into_tensor_single(self):
def func(arg: torch.Tensor) -> torch.Tensor:
ag0 = funcol.all_gather_tensor(arg, 0, "0")
ag0 = funcol.wait_tensor(ag0)
return ag0
arg = torch.rand(4, 4, device="cuda")
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, arg)
(
FileCheck()
.check(
"buf0 = torch.ops._c10d_functional.all_gather_into_tensor.default(arg0_1"
)
.check("torch.ops._c10d_functional.wait_tensor.default(buf0")
# Expect no extra copy on return
.check("return (buf0, )")
.run(code)
)
assert "= torch.ops._c10d_functional.wait_tensor.default" not in code
# Test aoti
out = AOTIRunnerUtil.run("cuda", func, (arg,))
torch.cuda.synchronize()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_inductor_all_gather_into_tensor_coalesced(self):
def func(args: List[torch.Tensor]) -> torch.Tensor:
ag0 = funcol.all_gather_into_tensor_coalesced(args, "0")
ag0 = [funcol.wait_tensor(out) for out in ag0]
return ag0
args = [torch.rand(4, 4, device="cuda") for _ in range(4)]
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, args)
(
FileCheck()
.check(
"buf0 = torch.ops._c10d_functional.all_gather_into_tensor_coalesced"
".default([arg0_1, arg1_1, arg2_1, arg3_1]"
)
.check("buf1 = buf0[0]")
.check("buf2 = buf0[1]")
.check("buf3 = buf0[2]")
.check("buf4 = buf0[3]")
.check("torch.ops._c10d_functional.wait_tensor.default(buf1")
.check("torch.ops._c10d_functional.wait_tensor.default(buf2")
.check("torch.ops._c10d_functional.wait_tensor.default(buf3")
.check("torch.ops._c10d_functional.wait_tensor.default(buf4")
# Expect no extra copy on return
.check("return (buf1, buf2, buf3, buf4, )")
.run(code)
)
# Test aoti
out = AOTIRunnerUtil.run("cuda", func, (args,))
torch.cuda.synchronize()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_inductor_reduce_scatter_tensor_single(self):
def func(arg: torch.Tensor) -> torch.Tensor:
rs0 = funcol.reduce_scatter_tensor(arg, "avg", 0, "0")
rs0 = funcol.wait_tensor(rs0)
return rs0
arg = torch.rand(4, 4, device="cuda")
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, arg)
(
FileCheck()
.check(
"buf0 = torch.ops._c10d_functional.reduce_scatter_tensor.default(arg0_1"
)
.check("torch.ops._c10d_functional.wait_tensor.default(buf0")
# Expect no extra copy on return
.check("return (buf0, )")
.run(code)
)
# Test aoti
out = AOTIRunnerUtil.run("cuda", func, (arg,))
torch.cuda.synchronize()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_inductor_reduce_scatter_tensor_coalesced(self):
def func(args: List[torch.Tensor]) -> torch.Tensor:
rs0 = funcol.reduce_scatter_tensor_coalesced(
args, "avg", [0] * len(args), "0"
)
rs0 = [funcol.wait_tensor(out) for out in rs0]
return rs0
args = [torch.rand(4, 4, device="cuda") for _ in range(4)]
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, args)
(
FileCheck()
.check(
"buf0 = torch.ops._c10d_functional.reduce_scatter_tensor_coalesced"
".default([arg0_1, arg1_1, arg2_1, arg3_1]"
)
.check("buf1 = buf0[0]")
.check("buf2 = buf0[1]")
.check("buf3 = buf0[2]")
.check("buf4 = buf0[3]")
.check("torch.ops._c10d_functional.wait_tensor.default(buf1")
.check("torch.ops._c10d_functional.wait_tensor.default(buf2")
.check("torch.ops._c10d_functional.wait_tensor.default(buf3")
.check("torch.ops._c10d_functional.wait_tensor.default(buf4")
# Expect no extra copy on return
.check("return (buf1, buf2, buf3, buf4, )")
.run(code)
)
# Test aoti
AOTIRunnerUtil.run("cuda", func, (args,))
torch.cuda.synchronize()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_inductor_all_to_all_single(self):
def _tolist_with_constrain_as_size(tensor):
lst = tensor.tolist()
for elem in lst:
torch._check_is_size(elem)
return lst
def func(
input: torch.Tensor,
output_split_sizes: torch.Tensor,
input_split_sizes: torch.Tensor,
) -> torch.Tensor:
output = funcol.all_to_all_single(
input,
_tolist_with_constrain_as_size(output_split_sizes),
_tolist_with_constrain_as_size(input_split_sizes),
"0",
)
return funcol.wait_tensor(output)
torch.manual_seed(42)
send_sz_matrix = torch.randint(0, 20, (self.world_size, self.world_size))
input_split_sizes = send_sz_matrix[self.rank]
output_split_sizes = send_sz_matrix[:, self.rank].contiguous()
input = torch.full((input_split_sizes.sum().item(),), float(self.rank)).cuda()
with torch._dynamo.config.patch(
dynamic_shapes=True,
capture_dynamic_output_shape_ops=True,
capture_scalar_outputs=True,
):
compiled = torch.compile(func, dynamic=True)
code = run_and_get_triton_code(
compiled, input, output_split_sizes, input_split_sizes
)
(
FileCheck()
.check_regex(
"torch.ops._c10d_functional.all_to_all_single.default\\("
"arg\\d+_\\d+, \\[u\\d+, u\\d+\\], \\[u\\d+, u\\d+\\]"
)
.check("torch.ops._c10d_functional.wait_tensor.default(")
.run(code)
)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_inductor_broadcast(self):
def func(arg: torch.Tensor) -> torch.Tensor:
buf0 = arg + 42
# Expect in-place with inductor allocated buf
br0 = funcol.broadcast(buf0, 1, "0")
br0 = funcol.wait_tensor(br0)
# Expect no in-place with graph input
br1 = funcol.broadcast(arg, 0, "0")
br1 = funcol.wait_tensor(br1)
return br0, br1
arg = torch.rand(4, 4, device="cuda")
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, arg)
(
FileCheck()
.check("buf0 = empty")
.check("buf7 = empty")
# Expect in-place with inductor allocated buf
.check("torch.ops._c10d_functional.broadcast_.default(buf0")
.check("torch.ops._c10d_functional.wait_tensor.default(buf0")
# Expect no in-place with graph input (buf5 is a clone)
.check("torch.ops._c10d_functional.broadcast_.default(buf7")
.check("torch.ops._c10d_functional.wait_tensor.default(buf7")
# Expect no extra copy on return
.check("return (buf0, buf7, )")
.run(code)
)
# Test aoti
out = AOTIRunnerUtil.run("cuda", func, (arg,))
torch.cuda.synchronize()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_ranks_and_tag(self):
def func(arg: torch.Tensor) -> torch.Tensor:
buf0 = arg + 42
# Expect in-place with inductor allocated buf
ar0 = funcol.all_reduce(buf0, "avg", [0, 1], "")
ar0 = funcol.wait_tensor(ar0)
# Expect no in-place with graph input
ar1 = funcol.all_reduce(arg, "avg", [0, 1], "")
ar1 = funcol.wait_tensor(ar1)
return ar0, ar1
arg = torch.rand(4, 4, device="cuda")
compiled = torch.compile(func, fullgraph=True)
code = run_and_get_triton_code(compiled, arg)
(FileCheck().check("all_reduce_.default(buf0, 'avg', '0')").run(code))
if __name__ == "__main__":
run_tests()
|
import threading
import unittest
from typing import List
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
from torch._C import FileCheck
from torch._inductor.utils import fresh_inductor_cache, run_and_get_triton_code
from torch.distributed._functional_collectives import (
all_gather_into_tensor_coalesced,
all_gather_tensor,
all_reduce,
all_reduce_coalesced,
all_to_all_single,
AsyncCollectiveTensor,
reduce_scatter_tensor,
reduce_scatter_tensor_coalesced,
)
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
run_tests,
TestCase,
)
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
import sys
from importlib.machinery import SourceFileLoader
from pathlib import Path
from unittest import mock
import sys
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_functional_native.py
|
wait
|
def wait(self, _):
nonlocal wait_called
wait_called = True
|
import threading
import unittest
from typing import List
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
from torch._C import FileCheck
from torch._inductor.utils import fresh_inductor_cache, run_and_get_triton_code
from torch.distributed._functional_collectives import (
all_gather_into_tensor_coalesced,
all_gather_tensor,
all_reduce,
all_reduce_coalesced,
all_to_all_single,
AsyncCollectiveTensor,
reduce_scatter_tensor,
reduce_scatter_tensor_coalesced,
)
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
run_tests,
TestCase,
)
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
import sys
from importlib.machinery import SourceFileLoader
from pathlib import Path
from unittest import mock
AOTIRunnerUtil = load_test_module("inductor.test_aot_inductor_utils").AOTIRunnerUtil
import sys
class MyWork(dist.Work):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_common.py
|
test_init_process_group_optional_backend
|
def test_init_process_group_optional_backend(self):
with tempfile.NamedTemporaryFile() as f:
store = dist.FileStore(f.name, self.world_size)
# creates both gloo and nccl backend
if dist.is_gloo_available() and dist.is_nccl_available():
dist.init_process_group(
store=store,
rank=self.rank,
world_size=self.world_size,
)
dist.destroy_process_group()
|
def test_init_process_group_optional_backend(self):
with tempfile.NamedTemporaryFile(delete=False) as f:
store = dist.FileStore(f.name, self.world_size)
# creates both gloo and nccl backend
if dist.is_gloo_available() and dist.is_nccl_available():
dist.init_process_group(
store=store,
rank=self.rank,
world_size=self.world_size,
)
dist.destroy_process_group()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class ProcessGroupWithDispatchedCollectivesTests(MultiProcessTestCase):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class ProcessGroupWithDispatchedCollectivesTests(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
test_init_process_group_for_all_backends
|
def test_init_process_group_for_all_backends(self):
for backend in dist.Backend.backend_list:
# skip if the backend is not available on the system
if backend == dist.Backend.UNDEFINED:
continue
elif backend == dist.Backend.MPI:
if not dist.is_mpi_available():
continue
elif backend == dist.Backend.NCCL:
if not dist.is_nccl_available():
continue
elif backend == dist.Backend.GLOO:
if not dist.is_gloo_available():
continue
elif backend == dist.Backend.UCC:
if not dist.is_ucc_available():
continue
with tempfile.NamedTemporaryFile() as f:
store = dist.FileStore(f.name, self.world_size)
dist.init_process_group(
backend=backend,
rank=self.rank,
world_size=self.world_size,
store=store
)
dist.destroy_process_group()
|
def test_init_process_group_for_all_backends(self):
for backend in dist.Backend.backend_list:
# skip if the backend is not available on the system
if backend == dist.Backend.UNDEFINED:
continue
elif backend == dist.Backend.MPI:
if not dist.is_mpi_available():
continue
elif backend == dist.Backend.NCCL:
if not dist.is_nccl_available() or not torch.cuda.is_available():
continue
elif backend == dist.Backend.GLOO:
if not dist.is_gloo_available():
continue
elif backend == dist.Backend.UCC:
if not dist.is_ucc_available():
continue
with tempfile.NamedTemporaryFile(delete=False) as f:
store = dist.FileStore(f.name, self.world_size)
dist.init_process_group(
backend=backend,
rank=self.rank,
world_size=self.world_size,
store=store,
)
pg = c10d._get_default_group()
self.assertEqual(pg.rank(), self.rank)
self.assertEqual(pg.size(), self.world_size)
self.assertEqual(pg.name(), str(backend))
dist.destroy_process_group()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class ProcessGroupWithDispatchedCollectivesTests(MultiProcessTestCase):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class ProcessGroupWithDispatchedCollectivesTests(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_gloo.py
|
test_allreduce_basics_cuda_using_work_api
|
def test_allreduce_basics_cuda_using_work_api(self):
self._test_allreduce_basics_using_work_api(lambda t: t.clone().cuda())
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class ProcessGroupGlooTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_gloo.py
|
test_round_robin_create_destroy
|
def test_round_robin_create_destroy(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
def create(num, prefix):
return c10d._round_robin_process_groups(
[
c10d.new_group(pg_options=self.opts())
for i in range(num)
]
)
# Run create/use/destroy twice
for i in range(2):
num_process_groups = 2
pg = create(num=num_process_groups, prefix=i)
for _ in range(3):
tensor = torch.ones([10, 10])
pg.allreduce(tensor).wait()
self.assertEqual(torch.full([10, 10], float(self.world_size)), tensor)
del pg
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class ProcessGroupGlooTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_gloo.py
|
__init__
|
def __init__(self):
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
|
def __init__(self) -> None:
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class ReducerModule(nn.Module):
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from datetime import timedelta
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
TestCase,
)
class ReducerModule(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_gloo.py
|
test_consecutive_comm_work_wait_gpu
|
def test_consecutive_comm_work_wait_gpu(self):
self._test_consecutive_comm_work_wait(
torch.ones(2, 2, device=self.rank) * self.rank
)
|
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class CompilerTest(test_c10d_common.CompilerTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/distributed/test_c10d_logger.py
|
process_group
|
return dist.group.WORLD
|
def process_group(self):
return dist.group.WORLD
|
import json
import logging
import os
import re
import sys
import time
from functools import partial, wraps
import torch
import torch.distributed as dist
from torch.distributed.c10d_logger import _c10d_logger, _exception_logger, _time_logger
from torch.testing._internal.common_distributed import MultiProcessTestCase, TEST_SKIPS
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
BACKEND = dist.Backend.NCCL
WORLD_SIZE = min(4, max(2, torch.cuda.device_count()))
class C10dErrorLoggerTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/test_c10d_logger.py
|
destroy_comms
|
# Wait for all ranks to reach here before starting shutdown.
dist.barrier()
dist.destroy_process_group()
|
def destroy_comms(self):
# Wait for all ranks to reach here before starting shutdown.
dist.barrier()
dist.destroy_process_group()
|
import json
import logging
import os
import re
import sys
import time
from functools import partial, wraps
import torch
import torch.distributed as dist
from torch.distributed.c10d_logger import _c10d_logger, _exception_logger, _time_logger
from torch.testing._internal.common_distributed import MultiProcessTestCase, TEST_SKIPS
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
BACKEND = dist.Backend.NCCL
WORLD_SIZE = min(4, max(2, torch.cuda.device_count()))
class C10dErrorLoggerTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/test_c10d_logger.py
|
dist_init
|
dist.init_process_group(
backend=BACKEND,
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
# set device for nccl pg for collectives
if BACKEND == "nccl":
torch.cuda.set_device(self.rank)
|
def dist_init(self):
dist.init_process_group(
backend=BACKEND,
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
# set device for nccl pg for collectives
if BACKEND == "nccl":
torch.cuda.set_device(self.rank)
|
import json
import logging
import os
import re
import sys
import time
from functools import partial, wraps
import torch
import torch.distributed as dist
from torch.distributed.c10d_logger import _c10d_logger, _exception_logger, _time_logger
from torch.testing._internal.common_distributed import MultiProcessTestCase, TEST_SKIPS
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
BACKEND = dist.Backend.NCCL
WORLD_SIZE = min(4, max(2, torch.cuda.device_count()))
class C10dErrorLoggerTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/test_c10d_gloo.py
|
test_ignored_sharded_tensor
|
def test_ignored_sharded_tensor(self):
class MyModule(nn.Module):
def __init__(self, shard_tensor: ShardedTensor) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.st = nn.Parameter(shard_tensor)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
return F.softmax(x, dim=1)
pg = dist.init_process_group(
"gloo",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
device = torch.device(f"cuda:{self.rank}")
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank % 2) * 5, 0],
shard_sizes=[5, 10],
placement=f"rank:{self.rank}/cuda:{self.rank}"
)
local_shards = [Shard(torch.randn(5, 10, device=device), local_shard_metadata)]
st = init_from_local_shards(local_shards, [10, 10])
m = MyModule(st)
with _ddp_replicated_tensor(False):
DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
module=m,
params_and_buffers_to_ignore={'st'}
)
# test to make DDP constructor will not fail when module includes a ShardedTensor when ignored
DistributedDataParallel(
m,
device_ids=[device] if device.type == "gpu" else None,
process_group=pg,
gradient_as_bucket_view=True,
broadcast_buffers=False,
static_graph=True,
)
|
def test_ignored_sharded_tensor(self):
class MyModule(nn.Module):
def __init__(self, shard_tensor: ShardedTensor) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.st = nn.Parameter(shard_tensor)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
return F.softmax(x, dim=1)
pg = dist.init_process_group(
"gloo",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
device = torch.device(f"cuda:{self.rank}")
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank % 2) * 5, 0],
shard_sizes=[5, 10],
placement=f"rank:{self.rank}/cuda:{self.rank}",
)
local_shards = [Shard(torch.randn(5, 10, device=device), local_shard_metadata)]
st = init_from_local_shards(local_shards, [10, 10])
m = MyModule(st)
DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
module=m, params_and_buffers_to_ignore={"st"}
)
# test to make DDP constructor will not fail when module includes a ShardedTensor when ignored
DistributedDataParallel(
m,
device_ids=[device] if device.type == "gpu" else None,
process_group=pg,
gradient_as_bucket_view=True,
broadcast_buffers=False,
static_graph=True,
)
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class DistributedDataParallelTest(
test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super().setUp()
self._spawn_processes()
def _get_process_group(self):
store = self._get_store()
c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
return c10d.distributed_c10d._get_default_group()
def _test_gloo_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
process_group = c10d.distributed_c10d._get_default_group()
device = devices[-1]
backend = process_group._get_backend(device)
backend.create_device(interface=LOOPBACK)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_gloo()
def test_gloo_backend_cpu_module(self):
self._test_gloo_backend([torch.device("cpu")], None)
@requires_gloo()
def test_gloo_backend_cpu_module_grad_is_view(self):
self._test_gloo_backend(
[torch.device("cpu")], None, gradient_as_bucket_view=True
)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, int_devices)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, devices)
@requires_gloo()
@skip_if_lt_x_gpu(4)
def test_gloo_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
@requires_gloo()
@skip_if_lt_x_gpu(8)
def test_gloo_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
def _test_global_local_unused_params_grad(
self, gradient_as_bucket_view=False, static_graph=False
):
"""
By simulating a multi-task training, this test is to make sure:
1) DDP does not touch the grad of globally unused parameters.
2) DDP does update the grad of locally unused parameters.
"""
class GlobalLocalUnusedParamModule(nn.Module):
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from datetime import timedelta
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
TestCase,
)
class DistributedDataParallelTest(
test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super().setUp()
self._spawn_processes()
def _get_process_group(self):
store = self._get_store()
c10d.init_process_group(
backend="gloo", store=store, rank=self.rank, world_size=self.world_size
)
return c10d.distributed_c10d._get_default_group()
def _test_gloo_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", store=store, rank=self.rank, world_size=self.world_size
)
process_group = c10d.distributed_c10d._get_default_group()
device = devices[-1]
backend = process_group._get_backend(device)
backend.create_device(interface=LOOPBACK)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_gloo()
def test_gloo_backend_cpu_module(self):
self._test_gloo_backend([torch.device("cpu")], None)
@requires_gloo()
def test_gloo_backend_cpu_module_grad_is_view(self):
self._test_gloo_backend(
[torch.device("cpu")], None, gradient_as_bucket_view=True
)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, int_devices)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, devices)
@requires_gloo()
@skip_if_lt_x_gpu(4)
def test_gloo_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
@requires_gloo()
@skip_if_lt_x_gpu(8)
def test_gloo_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
def _test_global_local_unused_params_grad(
self, gradient_as_bucket_view=False, static_graph=False
):
"""
By simulating a multi-task training, this test is to make sure:
1) DDP does not touch the grad of globally unused parameters.
2) DDP does update the grad of locally unused parameters.
"""
class GlobalLocalUnusedParamModule(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_gloo.py
|
test_save_load_checkpoint
|
def test_save_load_checkpoint(self):
dist.init_process_group(
"gloo",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
def train_loop(model, optimizer, iterations):
for _ in range(iterations):
optimizer.zero_grad()
output = model(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model_withload = TestModel().float().to(device_id)
model_withoutload = TestModel().float().to(device_id)
ddp_withload = DistributedDataParallel(
model_withload,
device_ids=[device_id],
)
ddp_withoutload = DistributedDataParallel(
model_withoutload,
device_ids=[device_id],
)
# ensure that all the three models start with the same set of parameters. By default they are randomized on construction
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
for p in model_withload.parameters():
with torch.no_grad():
p.zero_()
for p in ddp_withoutload.parameters():
with torch.no_grad():
p.zero_()
batch_size = 4
criterion = nn.CrossEntropyLoss()
optimizer_withload = torch.optim.SGD(ddp_withload.parameters(), lr=0.001)
optimizer_non_ddp_withload = torch.optim.SGD(
model_withload.parameters(), lr=0.001
)
optimizer_withoutload = torch.optim.SGD(ddp_withoutload.parameters(), lr=0.001)
input = torch.rand([batch_size, 2], dtype=torch.float).to(device_id)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# run the model for 6 iterations, with a checkpoint in the middle
train_loop(ddp_withload, optimizer_withload, 3)
# zero out parameters of both DDP and non-DDP models and reload them from the DDP state dict
checkpoint_path = tempfile.gettempdir() + "/model.checkpoint"
if self.rank == 0:
torch.save(ddp_withload.state_dict(), checkpoint_path)
dist.barrier()
map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank}
ddp_state_dict = torch.load(checkpoint_path, map_location=map_location)
for model in [ddp_withload, model_withload]:
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
ddp_withload.load_state_dict(ddp_state_dict)
# the non-DDP model needs to first remove the prefix of "module." from the DDP state dict
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
ddp_state_dict, "module."
)
model_withload.load_state_dict(ddp_state_dict)
train_loop(ddp_withload, optimizer_withload, 3)
train_loop(model_withload, optimizer_non_ddp_withload, 3)
# re-run the model with the same inputs for 6 iterations with no checkpoint
train_loop(ddp_withoutload, optimizer_withoutload, 6)
for p_withload, p_withoutload, p_non_ddp_withload in zip(
ddp_withload.parameters(),
ddp_withoutload.parameters(),
model_withload.parameters(),
):
self.assertEqual(p_withload, p_withoutload)
self.assertEqual(p_non_ddp_withload, p_withoutload)
|
def test_save_load_checkpoint(self):
dist.init_process_group(
"gloo",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
class TestModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
def train_loop(model, optimizer, iterations):
for _ in range(iterations):
optimizer.zero_grad()
output = model(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model_withload = TestModel().float().to(device_id)
model_withoutload = TestModel().float().to(device_id)
ddp_withload = DistributedDataParallel(
model_withload,
device_ids=[device_id],
)
ddp_withoutload = DistributedDataParallel(
model_withoutload,
device_ids=[device_id],
)
# ensure that all the three models start with the same set of parameters. By default they are randomized on construction
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
for p in model_withload.parameters():
with torch.no_grad():
p.zero_()
for p in ddp_withoutload.parameters():
with torch.no_grad():
p.zero_()
batch_size = 4
criterion = nn.CrossEntropyLoss()
optimizer_withload = torch.optim.SGD(ddp_withload.parameters(), lr=0.001)
optimizer_non_ddp_withload = torch.optim.SGD(
model_withload.parameters(), lr=0.001
)
optimizer_withoutload = torch.optim.SGD(ddp_withoutload.parameters(), lr=0.001)
input = torch.rand([batch_size, 2], dtype=torch.float).to(device_id)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# run the model for 6 iterations, with a checkpoint in the middle
train_loop(ddp_withload, optimizer_withload, 3)
# zero out parameters of both DDP and non-DDP models and reload them from the DDP state dict
checkpoint_path = tempfile.gettempdir() + "/model.checkpoint"
if self.rank == 0:
torch.save(ddp_withload.state_dict(), checkpoint_path)
dist.barrier()
map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank}
ddp_state_dict = torch.load(checkpoint_path, map_location=map_location)
for model in [ddp_withload, model_withload]:
for p in ddp_withload.parameters():
with torch.no_grad():
p.zero_()
ddp_withload.load_state_dict(ddp_state_dict)
# the non-DDP model needs to first remove the prefix of "module." from the DDP state dict
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
ddp_state_dict, "module."
)
model_withload.load_state_dict(ddp_state_dict)
train_loop(ddp_withload, optimizer_withload, 3)
train_loop(model_withload, optimizer_non_ddp_withload, 3)
# re-run the model with the same inputs for 6 iterations with no checkpoint
train_loop(ddp_withoutload, optimizer_withoutload, 6)
for p_withload, p_withoutload, p_non_ddp_withload in zip(
ddp_withload.parameters(),
ddp_withoutload.parameters(),
model_withload.parameters(),
):
self.assertEqual(p_withload, p_withoutload)
self.assertEqual(p_non_ddp_withload, p_withoutload)
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class DistributedDataParallelTest(
test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super().setUp()
self._spawn_processes()
def _get_process_group(self):
store = self._get_store()
c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
return c10d.distributed_c10d._get_default_group()
def _test_gloo_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(backend="gloo", store=store, rank=self.rank, world_size=self.world_size)
process_group = c10d.distributed_c10d._get_default_group()
device = devices[-1]
backend = process_group._get_backend(device)
backend.create_device(interface=LOOPBACK)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_gloo()
def test_gloo_backend_cpu_module(self):
self._test_gloo_backend([torch.device("cpu")], None)
@requires_gloo()
def test_gloo_backend_cpu_module_grad_is_view(self):
self._test_gloo_backend(
[torch.device("cpu")], None, gradient_as_bucket_view=True
)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, int_devices)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, devices)
@requires_gloo()
@skip_if_lt_x_gpu(4)
def test_gloo_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
@requires_gloo()
@skip_if_lt_x_gpu(8)
def test_gloo_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
def _test_global_local_unused_params_grad(
self, gradient_as_bucket_view=False, static_graph=False
):
"""
By simulating a multi-task training, this test is to make sure:
1) DDP does not touch the grad of globally unused parameters.
2) DDP does update the grad of locally unused parameters.
"""
class GlobalLocalUnusedParamModule(nn.Module):
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from datetime import timedelta
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
TestCase,
)
class DistributedDataParallelTest(
test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super().setUp()
self._spawn_processes()
def _get_process_group(self):
store = self._get_store()
c10d.init_process_group(
backend="gloo", store=store, rank=self.rank, world_size=self.world_size
)
return c10d.distributed_c10d._get_default_group()
def _test_gloo_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", store=store, rank=self.rank, world_size=self.world_size
)
process_group = c10d.distributed_c10d._get_default_group()
device = devices[-1]
backend = process_group._get_backend(device)
backend.create_device(interface=LOOPBACK)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_gloo()
def test_gloo_backend_cpu_module(self):
self._test_gloo_backend([torch.device("cpu")], None)
@requires_gloo()
def test_gloo_backend_cpu_module_grad_is_view(self):
self._test_gloo_backend(
[torch.device("cpu")], None, gradient_as_bucket_view=True
)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, int_devices)
@requires_gloo()
@skip_if_lt_x_gpu(2)
def test_gloo_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, devices)
@requires_gloo()
@skip_if_lt_x_gpu(4)
def test_gloo_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
@requires_gloo()
@skip_if_lt_x_gpu(8)
def test_gloo_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_gloo_backend(devices, None, multi_device=True)
def _test_global_local_unused_params_grad(
self, gradient_as_bucket_view=False, static_graph=False
):
"""
By simulating a multi-task training, this test is to make sure:
1) DDP does not touch the grad of globally unused parameters.
2) DDP does update the grad of locally unused parameters.
"""
class GlobalLocalUnusedParamModule(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_gloo.py
|
__init__
|
def __init__(self):
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
|
def __init__(self) -> None:
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class ReducerModule(nn.Module):
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from datetime import timedelta
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
TestCase,
)
class ReducerModule(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_gloo.py
|
__init__
|
def __init__(self):
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
|
def __init__(self) -> None:
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class ReducerModule(nn.Module):
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from datetime import timedelta
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
TestCase,
)
class ReducerModule(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_gloo.py
|
__init__
|
def __init__(self):
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
|
def __init__(self) -> None:
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class ReducerModule(nn.Module):
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from datetime import timedelta
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
TestCase,
)
class ReducerModule(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_gloo.py
|
__init__
|
def __init__(self):
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
|
def __init__(self) -> None:
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class ReducerModule(nn.Module):
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from datetime import timedelta
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
TestCase,
)
class ReducerModule(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_gloo.py
|
__init__
|
def __init__(self):
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
|
def __init__(self) -> None:
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.task_unused = Task()
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class ReducerModule(nn.Module):
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from datetime import timedelta
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
TestCase,
)
class ReducerModule(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
test_common_errors
|
def test_common_errors(self):
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env:
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?world_size={}".format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous("env://?rank={}".format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
|
def test_common_errors(self):
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env:
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous(f"env://?world_size={1}")
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous(f"env://?rank={0}")
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous(f"env://?rank={0}&world_size={1}")
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class RendezvousEnvTest(TestCase):
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
class RendezvousEnvTest(TestCase):
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_gloo.py
|
world_size
|
def world_size(self):
return 2
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class CompilerTest(test_c10d_common.CompilerTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_gloo.py
|
_get_default_group
|
def _get_default_group(self):
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend="gloo",
rank=self.rank,
world_size=self.world_size,
store=store,
)
return dist.distributed_c10d._get_default_group()
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class CompilerTest(test_c10d_common.CompilerTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_gloo.py
|
test_allreduce_work_wait_gpu
|
def test_allreduce_work_wait_gpu(self):
self._test_allreduce_work_wait(
torch.ones(2, 2, device=self.rank) * self.rank
)
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class CompilerTest(test_c10d_common.CompilerTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_gloo.py
|
test_allgather_work_wait_gpu
|
def test_allgather_work_wait_gpu(self):
self._test_allgather_work_wait(
torch.ones(2, 2, device=self.rank) * self.rank
)
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class CompilerTest(test_c10d_common.CompilerTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_gloo.py
|
test_broadcast_work_wait_gpu
|
def test_broadcast_work_wait_gpu(self):
self._test_broadcast_work_wait(
torch.ones(2, 2, device=self.rank) * self.rank
)
|
import copy
import logging
import math
import operator
import os
import random
import sys
import tempfile
from functools import reduce
from itertools import groupby
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import (
gpus_for_rank,
LOOPBACK,
ModuleForDdpCommHook,
SparseGradientModule,
Task,
)
from torch import nn
from torch.distributed._shard.sharded_tensor import (
init_from_local_shards,
Shard,
ShardedTensor,
ShardMetadata,
)
from torch.nn.parallel import DistributedDataParallel
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
from torch.testing._internal.common_distributed import (
create_device,
MultiProcessTestCase,
requires_gloo,
simple_sparse_reduce_tests,
skip_if_lt_x_gpu,
skip_if_win32,
verify_ddp_error_logged,
)
from torch.testing._internal.common_utils import (
retry_on_connect_failures,
run_tests,
sandcastle_skip,
TestCase,
)
class CompilerTest(test_c10d_common.CompilerTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
allreduce
|
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.SUM)
ndev = self.world_size
self.assertEqual(
torch.tensor([ndev * (ndev + 1) // 2]),
tensors[0],
)
# Avg (only available for NCCL 2.10+)
if torch.cuda.nccl.version() >= (2, 10, 0):
tensors = [torch.tensor([self.rank + 1.]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.AVG)
ndev = self.world_size
self.assertEqual(
torch.tensor([ndev * (ndev + 1.) / (2. * ndev)]),
tensors[0],
)
# Premul Sum
if torch.cuda.nccl.version() >= (2, 11, 1):
for dtype in torch.half, torch.float, torch.double:
for factor in (3.0, torch.tensor([5.0], device=local_device_id, dtype=dtype)):
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id).to(dtype=dtype)]
allreduce(tensors, c10d._make_nccl_premul_sum(factor))
self.assertEqual(
factor * torch.tensor([self.world_size * (self.world_size + 1) / 2],
dtype=dtype, device=local_device_id),
tensors[0],
)
# Product
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.PRODUCT)
self.assertEqual(
torch.tensor([math.factorial(self.world_size)]), tensors[0]
)
# Min
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.MIN)
self.assertEqual(torch.tensor([1]), tensors[0])
# Max
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.MAX)
self.assertEqual(torch.tensor([self.world_size]), tensors[0])
for op, err in zip((c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR),
("ReduceOp.BAND", "ReduceOp.BOR", "ReduceOp.BXOR")):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + err + " with NCCL"
):
allreduce(tensors, op)
|
self._create_process_group_nccl(store, self.opts())
device = self.rank_to_GPU[self.rank][0]
t = torch.rand(10, 10, device=device)
# First allreduce to initialize state.
dist.all_reduce(t)
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/distributed/test_c10d_nccl.py
|
withouts
|
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?world_size={}".format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous("env://?rank={}".format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
|
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous(f"env://?world_size={1}")
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous(f"env://?rank={0}")
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous(f"env://?rank={0}&world_size={1}")
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
test_init_no_gpus
|
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
|
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
ValueError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class ProcessGroupNCCLNoGPUTest(TestCase):
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
class ProcessGroupNCCLNoGPUTest(TestCase):
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
_create_process_group_nccl
|
def _create_process_group_nccl(self, store, opts):
# create nccl processgroup with opts
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=opts)
pg = c10d.distributed_c10d._get_default_group()
return pg
|
def _create_process_group_nccl(self, store, opts, device_id=None):
# create nccl processgroup with opts
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=opts,
device_id=device_id,
)
pg = c10d.distributed_c10d._get_default_group()
return pg
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class ProcessGroupNCCLTest(MultiProcessTestCase):
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
class ProcessGroupNCCLGroupTest(MultiProcessTestCase):
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
_create_process_group_nccl
|
def _create_process_group_nccl(self, store, opts):
# create nccl processgroup with opts
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=opts)
pg = c10d.distributed_c10d._get_default_group()
return pg
|
def _create_process_group_nccl(self, store, opts, device_id=None):
# create nccl processgroup with opts
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=opts,
device_id=device_id,
)
pg = c10d.distributed_c10d._get_default_group()
return pg
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class ProcessGroupNCCLTest(MultiProcessTestCase):
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
class ProcessGroupNCCLGroupTest(MultiProcessTestCase):
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
test_file_store_check
|
def test_file_store_check(self):
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0"
os.environ["TORCH_NCCL_ENABLE_MONITORING"] = "0"
# FileStore check() would be executed
os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "1"
os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "0"
# self.file_name is created using "delete=False"
# e.g., self.file_name = tempfile.NamedTemporaryFile(delete=False).name
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
pg = dist.distributed_c10d._get_default_group()
self.assertEqual(pg.rank(), self.rank)
self.assertEqual(pg.size(), self.world_size)
# give enough time for check() to be executed multiple times
time.sleep(2)
dist.destroy_process_group()
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
class ProcessGroupNCCLGroupTest(MultiProcessTestCase):
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
reduce_scatter_base
|
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
# anticipate an error
with self.assertRaisesRegex(
RuntimeError,
"input tensor must be the same size as output size times world size",
):
input_t = torch.tensor([self.rank]).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(
local_device_id
)
# fails the check because output_t is not correctly sized
reduce_scatter_base(output_t, input_t)
# anticipate an error
with self.assertRaisesRegex(
RuntimeError, "input tensor must be the same type as the output tensor."
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
local_device_id
)
# fails the check because the dtype is different
reduce_scatter_base(output_t, tensor)
|
opts = dict(
backend=backend,
store=store,
rank=self.rank,
world_size=self.world_size,
timeout=timedelta(seconds=123),
)
dist.init_process_group(**opts)
pg = dist.distributed_c10d._get_default_group()
bankend = pg._get_backend(torch.device(f"cuda:{self.rank}"))
w = pg.allreduce(torch.rand(10).cuda(self.rank))
self.assertTrue(bankend._verify_work_timeout(w, timedelta(seconds=123)))
w.wait()
bankend._set_default_timeout(timedelta(seconds=3))
if self.rank == 0:
# Ideally we want to sleep for a very long time, but this is not
# feasible in unit test. So this is only a very tiny case.
time.sleep(5)
pg.allreduce(torch.rand(10).cuda(self.rank))
time.sleep(5)
pg.allreduce(torch.rand(5).cuda(self.rank))
w = pg.allreduce(torch.rand(10).cuda(self.rank))
self.assertTrue(bankend._verify_work_timeout(w, timedelta(seconds=3)))
w.wait()
else:
dist.distributed_c10d._add_ephemeral_timeout_for_all_pgs(
timedelta(seconds=10)
)
w1 = pg.allreduce(torch.rand(10).cuda(self.rank))
w2 = pg.allreduce(torch.rand(5).cuda(self.rank))
self.assertTrue(bankend._verify_work_timeout(w1, timedelta(seconds=13)))
self.assertTrue(bankend._verify_work_timeout(w2, timedelta(seconds=13)))
w1.wait()
dist.distributed_c10d._add_ephemeral_timeout_for_all_pgs(
timedelta(seconds=5)
)
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/distributed/test_c10d_nccl.py
|
perm
|
def perm(n, k):
prod_val = n
for val in range(n - k + 1, n):
prod_val *= val
return prod_val
for i in range(num_gpus):
prod_val = perm(self.rank + self.world_size, self.world_size)
expected = torch.tensor([prod_val])
self.assertEqual(expected, output[i])
# Test the input params overridden scenarios, aka, when the input is
# a list and output is just one tensor.
# Sum
output_tensor = torch.empty_like(input_per_gpu[0][0]).cuda(self.rank)
input_list = [tensor[0].cuda(self.rank) for tensor in input_per_gpu]
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.SUM).wait()
expected = torch.tensor(
(1 + self.world_size) * self.world_size // 2 + self.world_size * self.rank
)
self.assertEqual(expected, output_tensor)
# Min
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.MIN).wait()
expected = torch.tensor(self.rank + 1)
self.assertEqual(expected, output_tensor)
# Max
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.MAX).wait()
expected = torch.tensor(self.rank + self.world_size)
self.assertEqual(expected, output_tensor)
# Product
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.PRODUCT).wait()
prod_val = self.rank + 1
for k in range(1, self.world_size):
prod_val = prod_val * (self.rank + 1 + k)
expected = torch.tensor(prod_val)
self.assertEqual(expected, output_tensor)
if torch.cuda.nccl.version() >= (2, 11, 1):
for factor in (3.0, torch.tensor([5.0], device=self.rank)):
if isinstance(factor, torch.Tensor):
factor_ref = factor.cpu().item()
else:
factor_ref = factor
output = [t.float() for t in output]
tensor_lists = [[t.float() for t in tl] for tl in tensor_lists]
output_ref = [t.float() for t in output]
tensor_lists_ref = [[t.float() * factor_ref for t in tl] for tl in tensor_lists]
reduce_scatter(output, tensor_lists, c10d._make_nccl_premul_sum(factor))
reduce_scatter(output_ref, tensor_lists_ref, c10d.ReduceOp.SUM)
self.assertEqual(output_ref, output)
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
reduce_scatter_base
|
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
# anticipate an error
with self.assertRaisesRegex(
RuntimeError,
"input tensor must be the same size as output size times world size",
):
input_t = torch.tensor([self.rank]).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(
local_device_id
)
# fails the check because output_t is not correctly sized
reduce_scatter_base(output_t, input_t)
# anticipate an error
with self.assertRaisesRegex(
RuntimeError, "input tensor must be the same type as the output tensor."
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
local_device_id
)
# fails the check because the dtype is different
reduce_scatter_base(output_t, tensor)
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
test_no_grad
|
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
process_group = self._get_process_group()
class NoGradModule(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(isinstance(output, torch.Tensor))
# No parameter should have their gradient set.
check_no_grads()
|
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
process_group = self._get_process_group()
class NoGradModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(isinstance(output, torch.Tensor))
# No parameter should have their gradient set.
check_no_grads()
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class DistributedDataParallelTest(
test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super().setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def _get_process_group(self):
store = self._get_store()
c10d.init_process_group("nccl", store=store, rank=self.rank, world_size=self.world_size)
return c10d.distributed_c10d._get_default_group()
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
process_group = self._get_process_group()
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_propagate_error_reason(self):
# Need to use NCCL_BLOCKING_WAIT and not ASYNC_ERROR_HANDLING,
# otherwise process will be taken down and we can't check for errors.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
os.environ["NCCL_BLOCKING_WAIT"] = "1"
# TODO: smaller timeout can fail since PG NCCl does health check in
# constructor. Look into reducing this test's runtime.
store = c10d.FileStore(self.file_name, self.world_size)
# provide sufficient timeout to initialize NCCL comm.
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size, timeout=timedelta(seconds=15))
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
pg.barrier().wait(timedelta(seconds=5))
# Simulate stuckness in rank 0.
if self.rank == 0:
pg_gloo.barrier().wait()
inp = torch.ones(1).cuda(self.rank)
if self.rank != 0:
# Time out due to rank 0 not calling into allreduce.
with self.assertRaises(RuntimeError):
pg.allreduce([inp]).wait(timedelta(seconds=5))
# Now when nonzero rank attempts to use communicator, original failure reason should be logged.j
try:
pg.allreduce([torch.ones(2).cuda(self.rank)]).wait()
except RuntimeError as e:
self.assertTrue("timed out in call to wait()" in str(e))
self.assertTrue("TensorShape=[1]" in str(e))
else:
self.fail("Expected error to be raised!")
# Unblock rank 0
pg_gloo.barrier().wait()
# TODO: We can also test that if rank 0 attempts to use the communicator,
# then we should error out with the info that it was aborted due to
# timeout on another rank. Although this would only be the case after
# the watchdog has run on the rank, and there is no reliable way
# to confirm it has run.
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
process_group = self._get_process_group()
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
process_group = self._get_process_group()
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
process_group = self._get_process_group()
class ForwardReturnValueModule(nn.Module):
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
class DistributedDataParallelTest(
test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super().setUp()
# TORCH_NCCL_BLOCKING_WAIT overrides TORCH_NCCL_ASYNC_ERROR_HANDLING hence tests
# that use TORCH_NCCL_BLOCKING_WAIT will test it as expected.
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def _get_process_group(self):
store = self._get_store()
c10d.init_process_group(
"nccl", store=store, rank=self.rank, world_size=self.world_size
)
return c10d.distributed_c10d._get_default_group()
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
process_group = self._get_process_group()
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_propagate_error_reason(self):
# Need to use TORCH_NCCL_BLOCKING_WAIT and not ASYNC_ERROR_HANDLING,
# otherwise process will be taken down and we can't check for errors.
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0"
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1"
# Need to disable TORCH_NCCL_DUMP_ON_TIMEOUT otherwise this test times out
os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
# provide sufficient timeout to initialize NCCL comm.
pg = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=15)
)
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
pg.barrier().wait(timedelta(seconds=5))
# Simulate stuckness in rank 0.
if self.rank == 0:
pg_gloo.barrier().wait()
inp = torch.ones(1).cuda(self.rank)
if self.rank != 0:
# Time out due to rank 0 not calling into allreduce.
with self.assertRaises(dist.DistBackendError):
pg.allreduce([inp]).wait(timedelta(seconds=5))
# Now when nonzero rank attempts to use communicator, original failure reason should be logged.
try:
pg.allreduce([torch.ones(2).cuda(self.rank)]).wait()
except dist.DistBackendError as e:
self.assertTrue("aborted" in str(e))
else:
self.fail("Expected error to be raised!")
# Unblock rank 0
pg_gloo.barrier().wait()
# TODO: We can also test that if rank 0 attempts to use the communicator,
# then we should error out with the info that it was aborted due to
# timeout on another rank. Although this would only be the case after
# the watchdog has run on the rank, and there is no reliable way
# to confirm it has run.
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
process_group = self._get_process_group()
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
process_group = self._get_process_group()
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2**15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
process_group = self._get_process_group()
class ForwardReturnValueModule(nn.Module):
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
test_ddp_complex_params
|
def test_ddp_complex_params(self):
class FFTModel(nn.Module):
def __init__(self, hin, win, n_features):
super().__init__()
self.hin = hin
self.win = win
self.weight = nn.Parameter(
torch.ones(
(n_features, n_features, hin, win // 2 + 1), dtype=torch.cfloat
)
)
def forward(self, x):
xc = torch.fft.rfft2(
x, s=(self.hin, self.win), dim=(-2, -1), norm="ortho"
)
xcw = torch.einsum("nchw,cohw->nohw", xc, self.weight)
x = torch.fft.irfft2(xcw, dim=(-2, -1), norm="ortho")
return x
process_group = self._get_process_group()
device_id = gpus_for_rank(self.world_size)[self.rank][0]
N, C, H, W = 1, 16, 64, 64
ddp_model = DistributedDataParallel(
FFTModel(hin=H, win=W, n_features=C).to(device_id),
device_ids=[device_id],
process_group=process_group,
)
optimizer = torch.optim.Adam(ddp_model.parameters(), lr=0.001)
inp = torch.ones((N, C, H, W), dtype=torch.float32)
# train step
out = ddp_model(inp)
loss = torch.sum(out)
loss.backward()
optimizer.step()
torch.cuda.synchronize(device=device_id)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
class DistributedDataParallelTest(
test_c10d_common.CommonDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super().setUp()
# TORCH_NCCL_BLOCKING_WAIT overrides TORCH_NCCL_ASYNC_ERROR_HANDLING hence tests
# that use TORCH_NCCL_BLOCKING_WAIT will test it as expected.
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def _get_process_group(self):
store = self._get_store()
c10d.init_process_group(
"nccl", store=store, rank=self.rank, world_size=self.world_size
)
return c10d.distributed_c10d._get_default_group()
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
process_group = self._get_process_group()
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_propagate_error_reason(self):
# Need to use TORCH_NCCL_BLOCKING_WAIT and not ASYNC_ERROR_HANDLING,
# otherwise process will be taken down and we can't check for errors.
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0"
os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1"
# Need to disable TORCH_NCCL_DUMP_ON_TIMEOUT otherwise this test times out
os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
# provide sufficient timeout to initialize NCCL comm.
pg = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=15)
)
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
pg.barrier().wait(timedelta(seconds=5))
# Simulate stuckness in rank 0.
if self.rank == 0:
pg_gloo.barrier().wait()
inp = torch.ones(1).cuda(self.rank)
if self.rank != 0:
# Time out due to rank 0 not calling into allreduce.
with self.assertRaises(dist.DistBackendError):
pg.allreduce([inp]).wait(timedelta(seconds=5))
# Now when nonzero rank attempts to use communicator, original failure reason should be logged.
try:
pg.allreduce([torch.ones(2).cuda(self.rank)]).wait()
except dist.DistBackendError as e:
self.assertTrue("aborted" in str(e))
else:
self.fail("Expected error to be raised!")
# Unblock rank 0
pg_gloo.barrier().wait()
# TODO: We can also test that if rank 0 attempts to use the communicator,
# then we should error out with the info that it was aborted due to
# timeout on another rank. Although this would only be the case after
# the watchdog has run on the rank, and there is no reliable way
# to confirm it has run.
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
process_group = self._get_process_group()
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
process_group = self._get_process_group()
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2**15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
process_group = self._get_process_group()
class ForwardReturnValueModule(nn.Module):
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
_get_store
|
def _get_store(self):
return dist.FileStore(self.file_name, self.world_size)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
class WorkHookTest(MultiProcessTestCase):
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
first_bucket_size
|
def first_bucket_size(ddp_bucket_mb):
old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES
dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)
try:
yield
finally:
dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for formats, dtypes, bucketsize in product(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
model_msg = (
"rank = {} formats = {} dtypes = {} bucketsize = {} ".format(
self.rank, formats, dtypes, bucketsize
)
)
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
copy.deepcopy(m),
device_ids=replica_devices,
process_group=process_group,
bucket_cap_mb=bucketsize,
)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print(
"Caught exception during model creation for " + model_msg,
flush=True,
)
raise
# 3 iters: First iter creates grads, second iter retests after rebucketing,
# third iter tries zeroed grads.
for it in range(3):
iter_msg = "iter = {} ".format(it) + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(
m_ddp(input[local_batch_start:local_batch_end]).float(),
target[local_batch_start:local_batch_end],
).backward()
for i, ((layer_name, m_child), m_ddp_child) in enumerate(
zip(m.named_children(), m_ddp.module.children())
):
named_msg = layer_name + ".weight" + " " + iter_msg
self.assertTrue(
m_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
self.assertTrue(
m_ddp_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
for j, ((param_name, p), p_ddp) in enumerate(
zip(
m_child.named_parameters(),
m_ddp_child.parameters(),
)
):
named_msg = (
layer_name + "." + param_name + " " + iter_msg
)
self.assertEqual(
p.grad, p_ddp.grad, rtol=tol, atol=tol
)
opt.step()
opt_ddp.step()
if it == 0:
for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):
p.grad = None
p_ddp.grad = None
else:
m.zero_grad()
m_ddp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print(
"Caught exception during iterations at " + named_msg,
flush=True,
)
raise
|
def first_bucket_size(ddp_bucket_mb):
old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES
dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)
try:
yield
finally:
dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for formats, dtypes, bucketsize in product(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
model_msg = f"rank = {self.rank} formats = {formats} dtypes = {dtypes} bucketsize = {bucketsize} "
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
copy.deepcopy(m),
device_ids=replica_devices,
process_group=process_group,
bucket_cap_mb=bucketsize,
)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print(
"Caught exception during model creation for " + model_msg,
flush=True,
)
raise
# 3 iters: First iter creates grads, second iter retests after rebucketing,
# third iter tries zeroed grads.
for it in range(3):
iter_msg = f"iter = {it} " + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(
m_ddp(input[local_batch_start:local_batch_end]).float(),
target[local_batch_start:local_batch_end],
).backward()
for i, ((layer_name, m_child), m_ddp_child) in enumerate(
zip(m.named_children(), m_ddp.module.children())
):
named_msg = layer_name + ".weight" + " " + iter_msg
self.assertTrue(
m_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
self.assertTrue(
m_ddp_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
for j, ((param_name, p), p_ddp) in enumerate(
zip(
m_child.named_parameters(),
m_ddp_child.parameters(),
)
):
named_msg = (
layer_name + "." + param_name + " " + iter_msg
)
self.assertEqual(
p.grad, p_ddp.grad, rtol=tol, atol=tol
)
opt.step()
opt_ddp.step()
if it == 0:
for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):
p.grad = None
p_ddp.grad = None
else:
m.zero_grad()
m_ddp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print(
"Caught exception during iterations at " + named_msg,
flush=True,
)
raise
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
hook
|
def hook(work_info: torch._C._distributed_c10d.WorkInfo):
nonlocal num_hook_fired, durations
num_hook_fired += 1
durations.append(work_info.active_duration.total_seconds())
pg._register_on_completion_hook(hook)
tensor = torch.ones([2, 3]).cuda(self.rank) * self.rank
pg.broadcast([tensor]).wait()
pg.broadcast([tensor]).wait()
# N.B.: destroy_process_group is necessary to wait for
# all pending works to finish.
c10d.destroy_process_group(pg)
self.assertEqual(num_hook_fired, 2)
self.assertEqual(len(durations), 2)
for duration in durations:
self.assertTrue(duration > 0)
self.assertEqual(tensor, torch.zeros([2, 3]).cuda(self.rank))
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
hook
|
def hook(work_info: torch._C._distributed_c10d.WorkInfo):
nonlocal num_hook_fired, durations
num_hook_fired += 1
durations.append(work_info.active_duration.total_seconds())
pg._register_on_completion_hook(hook)
tensor = torch.ones([2, 3]).cuda(self.rank) * self.rank
pg.broadcast([tensor]).wait()
pg.broadcast([tensor]).wait()
# N.B.: destroy_process_group is necessary to wait for
# all pending works to finish.
c10d.destroy_process_group(pg)
self.assertEqual(num_hook_fired, 2)
self.assertEqual(len(durations), 2)
for duration in durations:
self.assertTrue(duration > 0)
self.assertEqual(tensor, torch.zeros([2, 3]).cuda(self.rank))
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
hook
|
def hook(work_info: torch._C._distributed_c10d.WorkInfo):
nonlocal num_hook_fired, durations
num_hook_fired += 1
durations.append(work_info.active_duration.total_seconds())
pg._register_on_completion_hook(hook)
tensor = torch.ones([2, 3]).cuda(self.rank) * self.rank
pg.broadcast([tensor]).wait()
pg.broadcast([tensor]).wait()
# N.B.: destroy_process_group is necessary to wait for
# all pending works to finish.
c10d.destroy_process_group(pg)
self.assertEqual(num_hook_fired, 2)
self.assertEqual(len(durations), 2)
for duration in durations:
self.assertTrue(duration > 0)
self.assertEqual(tensor, torch.zeros([2, 3]).cuda(self.rank))
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
hook
|
def hook(work_info: torch._C._distributed_c10d.WorkInfo):
nonlocal num_hook_fired, durations
num_hook_fired += 1
durations.append(work_info.active_duration.total_seconds())
pg._register_on_completion_hook(hook)
tensor = torch.ones([2, 3]).cuda(self.rank) * self.rank
pg.broadcast([tensor]).wait()
pg.broadcast([tensor]).wait()
# N.B.: destroy_process_group is necessary to wait for
# all pending works to finish.
c10d.destroy_process_group(pg)
self.assertEqual(num_hook_fired, 2)
self.assertEqual(len(durations), 2)
for duration in durations:
self.assertTrue(duration > 0)
self.assertEqual(tensor, torch.zeros([2, 3]).cuda(self.rank))
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
test_on_completion_hook_seq
|
def test_on_completion_hook_seq(self):
pg = self._get_process_group()
num_hook_fired = 0
seq: int = -1
work: int = 0
def hook(work_info: torch._C._distributed_c10d.WorkInfo):
nonlocal num_hook_fired, seq
num_hook_fired += 1
seq = work_info.seq
pg._register_on_completion_hook(hook)
tensor = torch.ones([2, 3]).cuda(self.rank) * self.rank
work_count = 3
for i in range(work_count):
work += 1
pg.broadcast([tensor]).wait()
# N.B.: destroy_process_group is necessary to wait for
# all pending works to finish.
c10d.destroy_process_group(pg)
self.assertEqual(num_hook_fired, work_count)
self.assertEqual(work, seq)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
class WorkHookTest(MultiProcessTestCase):
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
blocking_wait_error_msg
|
def blocking_wait_error_msg(self):
return "Caught collective operation timeout"
|
def blocking_wait_error_msg(self):
return "timeout"
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class NcclErrorHandlingTest(MultiProcessTestCase):
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
class NcclErrorHandlingTest(MultiProcessTestCase):
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
test_nccl_errors_nonblocking
|
def test_nccl_errors_nonblocking(self):
# Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test
# since test_c10d_common runs with async error handling by default, but this
# tests behavior when it is not enabled.
prev_nccl_async_error_handling = os.environ.get(
"NCCL_ASYNC_ERROR_HANDLING", None
)
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
# This allreduce does not block Python thread as allreduce enqueues
# the cuda operation, and then wait only blocks the current cuda
# stream.
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
work.wait()
# Now the work scheduled next should hang forever since the previous
# allreduce will never complete.
t = threading.Thread(target=self._run_all_reduce, args=(process_group,))
t.daemon = True
t.start()
t.join(int(get_timeout(self.id()) / 5))
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
|
def test_nccl_errors_nonblocking(self):
# Note: we unset and restore TORCH_NCCL_ASYNC_ERROR_HANDLING for this test
# since test_c10d_common runs with async error handling by default, but this
# tests behavior when it is not enabled.
prev_nccl_async_error_handling = os.environ.get(
"TORCH_NCCL_ASYNC_ERROR_HANDLING", None
)
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
# This allreduce does not block Python thread as allreduce enqueues
# the cuda operation, and then wait only blocks the current cuda
# stream.
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
work.wait()
# Now the work scheduled next should hang forever since the previous
# allreduce will never complete.
t = threading.Thread(target=self._run_all_reduce, args=(process_group,))
t.daemon = True
t.start()
t.join(int(get_timeout(self.id()) / 5))
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
os.environ[
"TORCH_NCCL_ASYNC_ERROR_HANDLING"
] = prev_nccl_async_error_handling
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class NcclErrorHandlingTest(MultiProcessTestCase):
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
class NcclErrorHandlingTest(MultiProcessTestCase):
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
test_nccl_blocking_wait_with_barrier
|
def test_nccl_blocking_wait_with_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=10),
)
process_group.barrier().wait()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# This should timeout
process_group.barrier().wait(timeout=timedelta(seconds=self.op_timeout_sec))
|
def test_nccl_blocking_wait_with_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=10),
)
process_group.barrier().wait()
if self.rank == 0:
with self.assertRaisesRegex(dist.DistBackendError, ""):
# It seems the error message would be different depending on
# whether the test is run on CI machine and devGPU. Skipping
# the error message check to make both sides happy.
process_group.barrier().wait(
timeout=timedelta(seconds=self.op_timeout_sec)
)
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class NcclErrorHandlingTest(MultiProcessTestCase):
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
class NcclErrorHandlingTest(MultiProcessTestCase):
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
_check_valid_comm_exception
|
def _check_valid_comm_exception(self, e):
exception_str = str(e)
valid_exceptions = [
"NCCL communicator was aborted",
"NCCL communicator encountered error",
"Caught collective operation timeout"
]
return any(exc in exception_str for exc in valid_exceptions)
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class NcclErrorHandlingTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
test_nccl_timeout
|
def test_nccl_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
# Initialize process_group.
process_group = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=10)
)
# Control gloo pg used as go-ahead signal/barrier
# to coordinate btwn ranks.
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
failed_collective_timeout = timedelta(milliseconds=100)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=timedelta(seconds=5))
if self.rank == 0:
# This should timeout in about 1 second.
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=failed_collective_timeout)
# Now do a barrier to tell other rank to go ahead.
pg_gloo.barrier().wait()
else:
# Wait on rank 0 to fail.
try:
pg_gloo.barrier().wait()
except Exception as e:
raise ValueError(f"Rank {self.rank} barrier timed out waiting for rank 0 with error: {str(e)}") from e
# Now verify communicators on this rank have
# been aborted by watchdog.
self._wait_for_comm_abort(process_group, failed_collective_timeout)
|
def test_nccl_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
# Initialize process_group.
process_group = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=10)
)
# Control gloo pg used as go-ahead signal/barrier
# to coordinate btwn ranks.
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
failed_collective_timeout = timedelta(milliseconds=100)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(
timeout=timedelta(seconds=5)
)
if self.rank == 0:
# This should timeout in about 1 second.
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
with self.assertRaisesRegex(
dist.DistBackendError, self.blocking_wait_error_msg
):
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(
timeout=failed_collective_timeout
)
# Now do a barrier to tell other rank to go ahead.
pg_gloo.barrier().wait()
else:
# Wait on rank 0 to fail.
try:
pg_gloo.barrier().wait()
except Exception as e:
raise ValueError(
f"Rank {self.rank} barrier timed out waiting for rank 0 with error: {str(e)}"
) from e
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class NcclErrorHandlingTest(MultiProcessTestCase):
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
class NcclErrorHandlingTest(MultiProcessTestCase):
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
test_new_group_local_sync_duplicated_pg
|
def test_new_group_local_sync_duplicated_pg(self):
self._test_new_group_local_sync_duplicate_pg(backend="nccl")
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
test_scatter_object_list_subgroup
|
def test_scatter_object_list_subgroup(self):
world_size = 4
if self.rank >= world_size:
return
subgroup = self._init_two_pg2_subgroups(world_size)
torch.cuda.set_device(self.rank)
scatter_object_output_list = [None]
expected = [{"rank": self.rank}]
if self.rank == 0 or self.rank == 2:
c10d.scatter_object_list(
scatter_object_output_list=scatter_object_output_list,
scatter_object_input_list=None,
src=self.rank + 1,
group=subgroup,
)
else:
scatter_object_input_list = [
{"rank": self.rank - 1},
{"rank": self.rank},
]
c10d.scatter_object_list(
scatter_object_output_list=scatter_object_output_list,
scatter_object_input_list=scatter_object_input_list,
src=self.rank,
group=subgroup,
)
self.assertEqual(scatter_object_output_list, expected)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
reduce
|
def reduce(xs, rootRank, rootTensor, op=None):
opts = c10d.ReduceOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
if op:
opts.reduceOp = op
work = pg.reduce(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.world_size):
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
reduce(tensors, rt, 0)
if self.rank == rt:
self.assertEqual(
torch.tensor([self.world_size * (self.world_size + 1) // 2]),
tensors[0],
)
else:
self.assertEqual(
torch.tensor([self.rank + 1]),
tensors[0],
)
for op, err in zip(
(c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR),
("ReduceOp.BAND", "ReduceOp.BOR", "ReduceOp.BXOR"),
):
with self.assertRaisesRegex(
ValueError, "Cannot use " + err + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
# Premul sum
if torch.cuda.nccl.version() >= (2, 11, 1):
for factor in (3.0, torch.tensor([5.0], device=local_device_id)):
if isinstance(factor, torch.Tensor):
factor_ref = factor.cpu().item()
else:
factor_ref = factor
float_tensors = [
torch.tensor(
[self.rank + 1.0], device=f"cuda:{local_device_id}"
)
]
float_tensors_ref = [
torch.tensor(
[(self.rank + 1.0) * factor_ref],
device=f"cuda:{local_device_id}",
)
]
reduce(float_tensors_ref, rt, 0)
reduce(float_tensors, rt, 0, c10d._make_nccl_premul_sum(factor))
if self.rank == rt:
self.assertEqual(float_tensors_ref[0], float_tensors[0])
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
allgather
|
def allgather(output_ts, input_ts):
work = pg.allgather(output_ts, input_ts)
return work.wait()
tensors = [torch.empty(2, 2).fill_(2).cuda(device=i) for i in local_device_ids]
output_tensors = []
expected_output = []
output_per_gpu = (
[torch.empty(2, 2).fill_(-1)] * len(local_device_ids) * self.world_size
)
expected_per_gpu = (
[torch.empty(2, 2).fill_(2)] * len(local_device_ids) * self.world_size
)
for gpu in local_device_ids:
output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])
expected_output.append([t.cuda(device=gpu) for t in expected_per_gpu])
result = allgather(output_tensors, tensors)
# Verification
self.assertEqual(output_tensors, expected_output)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
allgather_base
|
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(local_device_id)
output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(
local_device_id
)
allgather_base(output_t, tensor)
# Verification
self.assertEqual(torch.arange(self.world_size), output_t)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
allgather_base
|
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(local_device_id)
output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(
local_device_id
)
allgather_base(output_t, tensor)
# Verification
self.assertEqual(torch.arange(self.world_size), output_t)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
gather
|
def gather(output_t, input_t, rootRank):
opts = c10d.GatherOptions()
opts.rootRank = rootRank
if rootRank == self.rank:
work = pg.gather(output_t, input_t, opts)
else:
work = pg.gather([], input_t, opts)
work.wait()
# init input
tensors = []
for device_id in local_device_ids:
tensors.append(torch.tensor([self.rank]).cuda(device_id))
# init output
output_ts = []
for idx in range(num_gpus):
gpu_idx = local_device_ids[idx]
output_ts.append([])
for rank in range(self.world_size):
output_ts[idx].append(torch.tensor([-1]).cuda(gpu_idx))
expected = [[torch.tensor([rank]) for rank in range(self.world_size)]]
for rank in range(self.world_size):
gather(output_ts, tensors, rank)
if rank == self.rank:
self.assertEqual(expected, output_ts)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
gather
|
def gather(output_t, input_t, rootRank):
opts = c10d.GatherOptions()
opts.rootRank = rootRank
if rootRank == self.rank:
work = pg.gather(output_t, input_t, opts)
else:
work = pg.gather([], input_t, opts)
work.wait()
# init input
tensors = []
for device_id in local_device_ids:
tensors.append(torch.tensor([self.rank]).cuda(device_id))
# init output
output_ts = []
for idx in range(num_gpus):
gpu_idx = local_device_ids[idx]
output_ts.append([])
for rank in range(self.world_size):
output_ts[idx].append(torch.tensor([-1]).cuda(gpu_idx))
expected = [[torch.tensor([rank]) for rank in range(self.world_size)]]
for rank in range(self.world_size):
gather(output_ts, tensors, rank)
if rank == self.rank:
self.assertEqual(expected, output_ts)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
scatter
|
def scatter(output_t, input_t, rootRank):
opts = c10d.ScatterOptions()
opts.rootRank = rootRank
if rootRank == self.rank:
work = pg.scatter(output_t, input_t, opts)
else:
work = pg.scatter(output_t, [], opts)
work.wait()
# init output
tensors = []
for device_id in local_device_ids:
tensors.append(torch.tensor([-1]).cuda(device_id))
# init input
scatter_list = []
for idx in range(num_gpus):
gpu_idx = local_device_ids[idx]
scatter_list.append([])
for rank in range(self.world_size):
scatter_list[idx].append(torch.tensor([rank]).cuda(gpu_idx))
# test each rank to scatter
expected = [torch.tensor([self.rank])]
for rank in range(self.world_size):
scatter(tensors, scatter_list, rank)
self.assertEqual(expected, tensors)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
scatter
|
def scatter(output_t, input_t, rootRank):
opts = c10d.ScatterOptions()
opts.rootRank = rootRank
if rootRank == self.rank:
work = pg.scatter(output_t, input_t, opts)
else:
work = pg.scatter(output_t, [], opts)
work.wait()
# init output
tensors = []
for device_id in local_device_ids:
tensors.append(torch.tensor([-1]).cuda(device_id))
# init input
scatter_list = []
for idx in range(num_gpus):
gpu_idx = local_device_ids[idx]
scatter_list.append([])
for rank in range(self.world_size):
scatter_list[idx].append(torch.tensor([rank]).cuda(gpu_idx))
# test each rank to scatter
expected = [torch.tensor([self.rank])]
for rank in range(self.world_size):
scatter(tensors, scatter_list, rank)
self.assertEqual(expected, tensors)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
test_ddp_set_sparse_metadata
|
def test_ddp_set_sparse_metadata(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
)
vocab_size = 5
model = SparseCollective.ToyModel(
self.rank, vocab_size=vocab_size, embedding_dim=10
)
ddp_model = DistributedDataParallel(model)
inputs = torch.tensor([[1, 0, 0], [0, 0, 0], [0, 0, 0]]).to(self.rank)
# set sparse metadata on the DDP model
indices = torch.Tensor(list(range(vocab_size)))
ddp_model._set_sparse_metadata({"embedding.weight": indices})
# forward pass
try:
output = ddp_model(inputs)
loss = output.sum()
# backward pass
loss.backward()
self.assertTrue(ddp_model.module.embedding.weight.grad.indices, indices)
except RuntimeError as e:
if "NCCL does not support all_reduce with sparse tensors" in str(e):
pass
else:
# Rethrow the exception if it's a different error
raise
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class SparseCollective(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
local_device
|
def local_device(self):
return torch.device("cuda", self.rank_to_GPU[self.rank][0])
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class NCCLTraceTestBase(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
_join_processes
|
def _join_processes(self, fn):
# We need to patch sys.exit() as skip_if will use sys.exit() and
# the exit code from the this process will not be catched.
with mock.patch("sys.exit") as exit_mock:
fn()
super()._join_processes(fn)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class NCCLTraceTestBase(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
wrap
|
def wrap(*positional, args, **kwargs):
args = (next(piter), *args)
return proc(*positional, args=args, **kwargs)
self._start_processes(wrap)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
_create_process_group_nccl
|
def _create_process_group_nccl(self, store, opts):
# create nccl processgroup with opts
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=opts)
pg = c10d.distributed_c10d._get_default_group()
return pg
|
def _create_process_group_nccl(self, store, opts, device_id=None):
# create nccl processgroup with opts
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=opts,
device_id=device_id,
)
pg = c10d.distributed_c10d._get_default_group()
return pg
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class ProcessGroupNCCLTest(MultiProcessTestCase):
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
class ProcessGroupNCCLGroupTest(MultiProcessTestCase):
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
_trace_basename
|
def _trace_basename(self):
# we pass the base to the env, and the dump util will append rank
return os.path.join(self.tempdir.name, "trace_")
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class NCCLTraceTestBase(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
test_allgather_float8
|
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
)
device = "cuda"
tensor = torch.ones(10, 10, device=torch.device(device))
output_tensor = torch.zeros(10, 10, device=torch.device(device))
dist.reduce_scatter_tensor(output_tensor, tensor)
self.assertEqual(output_tensor, tensor)
|
def test_allgather_float8(self, float8_dtype):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
)
device = "cuda"
tensor = torch.ones(10, 16, device=torch.device(device)).to(float8_dtype)
output_tensor = torch.zeros(10, 16, device=torch.device(device)).to(
float8_dtype
)
dist.all_gather_into_tensor(output_tensor, tensor)
self.assertEqual(output_tensor.view(torch.float32), tensor.view(torch.float32))
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class NcclProcessGroupWithDispatchedCollectivesTests(
test_c10d_common.ProcessGroupWithDispatchedCollectivesTests
):
@requires_nccl()
@skip_if_lt_x_gpu(1)
def test_collectives(self):
self._test_collectives(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(1)
def test_allreduce_coalesced(self):
self._test_allreduce_coalesced(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(1)
def test_all_to_all_single(self):
self._test_all_to_all_single(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(1)
def test_allgather_base(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
)
device = "cuda"
tensor = torch.ones(10, 10, device=torch.device(device))
output_tensor = torch.zeros(10, 10, device=torch.device(device))
dist.all_gather_into_tensor(output_tensor, tensor)
self.assertEqual(output_tensor, tensor)
@requires_nccl()
@skip_if_lt_x_gpu(1)
@parametrize("float8_dtype", [torch.float8_e4m3fn, torch.float8_e5m2])
def test_allgather_float8(self, float8_dtype):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
)
device = "cuda"
tensor = torch.ones(10, 16, device=torch.device(device)).to(float8_dtype)
output_tensor = torch.zeros(10, 16, device=torch.device(device)).to(
float8_dtype
)
dist.all_gather_into_tensor(output_tensor, tensor)
self.assertEqual(output_tensor.view(torch.float32), tensor.view(torch.float32))
instantiate_parametrized_tests(NcclProcessGroupWithDispatchedCollectivesTests)
class LargeCommTest(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/test_c10d_ops_nccl.py
|
broadcast
|
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
return xs
# Every rank is root once
for i in range(self.world_size):
# Run with 1 input tensor
x = torch.tensor([self.rank]).cuda(self.rank_to_GPU[self.rank][0])
output = broadcast([x], i, 0)
self.assertEqual(torch.tensor([i]), output[0])
expected_tensor = torch.empty([i + 1, i + 1]).fill_(i + 1)
xs = [
torch.empty([i + 1, i + 1]).fill_(-1).cuda(device=device_idx)
for device_idx in self.rank_to_GPU[self.rank]
]
# test with multiple input tensors (multiple gpu in one rank)
for j in range(len(xs)):
if self.rank == i:
xs[j] = expected_tensor.cuda(device=self.rank_to_GPU[self.rank][j])
broadcast(xs, i, j)
for tensor in xs:
self.assertEqual(tensor, expected_tensor)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
allreduce
|
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.SUM)
ndev = self.world_size
self.assertEqual(
torch.tensor([ndev * (ndev + 1) // 2]),
tensors[0],
)
# Avg (only available for NCCL 2.10+)
if torch.cuda.nccl.version() >= (2, 10, 0):
tensors = [torch.tensor([self.rank + 1.0]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.AVG)
ndev = self.world_size
self.assertEqual(
torch.tensor([ndev * (ndev + 1.0) / (2.0 * ndev)]),
tensors[0],
)
# Premul Sum
if torch.cuda.nccl.version() >= (2, 11, 1):
for dtype in torch.half, torch.float, torch.double:
for factor in (
3.0,
torch.tensor([5.0], device=local_device_id, dtype=dtype),
):
tensors = [
torch.tensor([self.rank + 1])
.cuda(local_device_id)
.to(dtype=dtype)
]
allreduce(tensors, c10d._make_nccl_premul_sum(factor))
self.assertEqual(
factor
* torch.tensor(
[self.world_size * (self.world_size + 1) / 2],
dtype=dtype,
device=local_device_id,
),
tensors[0],
)
# Product
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.PRODUCT)
self.assertEqual(torch.tensor([math.factorial(self.world_size)]), tensors[0])
# Min
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.MIN)
self.assertEqual(torch.tensor([1]), tensors[0])
# Max
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.MAX)
self.assertEqual(torch.tensor([self.world_size]), tensors[0])
for op, err in zip(
(c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR),
("ReduceOp.BAND", "ReduceOp.BOR", "ReduceOp.BXOR"),
):
with self.assertRaisesRegex(ValueError, "Cannot use " + err + " with NCCL"):
allreduce(tensors, op)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
gather_trace
|
def gather_trace():
e.synchronize()
# give the other thread some time to fill the cuda buffer
time.sleep(5)
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
t = t["entries"]
self.assertEqual(t[-1]["profiling_name"], "nccl:all_reduce")
if self.rank == 0:
self.assertEqual(t[-1]["collective_seq_id"], 1)
self.assertEqual(t[-1]["state"], "completed")
else:
self.assertEqual(t[-1]["collective_seq_id"], 2)
self.assertEqual(
t[-1]["state"], self.started_or_scheduled(timing_enabled)
)
self.assertIsNone(t[-1]["time_discovered_completed_ns"])
# this will eventually cause the missing rank 0
# to continue which will unblock the non-zero ranks
self.parent.send("next")
if self.rank != 0:
pg.allreduce(a).wait()
th = threading.Thread(target=gather_trace)
th.start()
# fill the cuda buffer, at around 1024 events
# this will stall
for i in range(2000):
a = a + a
th.join()
else:
gather_trace()
self.assertEqual("next", self.parent.recv())
if self.rank == 0:
pg.allreduce(a).wait()
torch.cuda.synchronize(device=device)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
test_coalescing_manager_collective
|
def test_coalescing_manager_collective(self, timing_enabled):
"""
The coalescing manager api works by accumulating operations in python via a contextmanager, and then making
one call into c++ to an <op>_coalesced API. It has limited support for ops and has been added recently to
avoid overheads of making individual py-cpp calls. This complicates flight recording..
For now, flight recording of coalescing_manager collectives is less detailed than cpp coalesced collectives.
"""
if self.rank == self.MAIN_PROCESS_RANK:
return
pg = self._create_process_group_nccl()
if timing_enabled:
pg._enable_collectives_timing()
output_tensors = torch.zeros(2, 2).to(self.rank)
input_tensors = [torch.ones(2, 2).to(self.rank) for _ in range(self.world_size)]
# TODO(whc) make this work with bigger world or something
self.assertEqual(self.world_size, 2, self.world_size)
with dist._coalescing_manager():
for i in range(self.world_size):
dist.reduce_scatter_tensor(output_tensors[i], input_tensors[i])
self.assertEqual(output_tensors, input_tensors[self.rank] * self.world_size)
torch.cuda.synchronize(device=self.rank)
if timing_enabled:
# wait for watchdog thread to process the queue of works
time.sleep(1)
t = pickle.loads(torch._C._distributed_c10d._dump_nccl_trace())
self.assertEqual(
len(t["entries"]), 1
) # one for the reduce_scatter_tensor_coalesced, one for the endCoalescing
self.assertEqual(
t["entries"][0]["profiling_name"], "nccl:reduce_scatter_tensor_coalesced"
)
self.assertEqual(t["entries"][0]["collective_seq_id"], 1)
self.assertEqual(t["entries"][0]["input_sizes"], [[2, 2], [2, 2]])
self.assertEqual(
t["entries"][0]["output_sizes"],
[
[
2,
],
[
2,
],
],
)
self.assertEqual(t["entries"][0]["state"], "completed")
if timing_enabled:
duration = t["entries"][0]["duration_ms"]
self.assertTrue(0.001 < duration < 10000, duration)
else:
self.assertTrue("duration_ms" not in t["entries"][0])
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class NCCLTraceTest(NCCLTraceTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
wrapper
|
def wrapper(self, *args, **kwargs):
for skip in TEST_SKIPS.values():
if self.processes[0].exitcode == skip.exit_code:
return MultiProcessTestCase._check_return_codes(self, *args, **kwargs)
return fn(self, *args, **kwargs)
return wrapper
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
_create_process_group_nccl
|
def _create_process_group_nccl(self, store, opts):
# create nccl processgroup with opts
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=opts)
pg = c10d.distributed_c10d._get_default_group()
return pg
|
def _create_process_group_nccl(self, store, opts, device_id=None):
# create nccl processgroup with opts
c10d.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=opts,
device_id=device_id,
)
pg = c10d.distributed_c10d._get_default_group()
return pg
|
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_nccl,
requires_gloo,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and
(
(torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) >= 11)
or torch.version.hip is not None
)
)
class ProcessGroupNCCLTest(MultiProcessTestCase):
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
class ProcessGroupNCCLGroupTest(MultiProcessTestCase):
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_nccl.py
|
_check_return_codes
|
def _check_return_codes(self, elapsed_time):
# the base test infra assumes processes exit with matching return codes,
# but we want rank0 to abort and rank1 to exit cleanly in this test
self.assertEqual(self.processes[0].exitcode, -6)
self.assertEqual(self.processes[1].exitcode, 0)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class NCCLTraceTestDumpOnTimeoutBase(NCCLTraceTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
_wait_process
|
def _wait_process(self, rank, timeout):
try:
self.processes[rank].join(timeout)
return self.processes[rank].exitcode
except TimeoutError:
return None
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class NCCLTraceTestDumpOnTimeoutBase(NCCLTraceTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
test_timeout_dumps
|
def test_timeout_dumps(self, timing_enabled):
# dump on heartbeatmonitor thread
os.environ["TORCH_NCCL_COORD_CHECK_MILSEC"] = "1000"
# need rank0 to crash before looking for its output file
os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "1"
if self.rank == self.MAIN_PROCESS_RANK:
# wait for rank0 to crash before looking for its output file
# we rely on rank0 holding off its abort long enough to dump the debug info
self.assertEqual(self._wait_process(0, timeout=90), -6)
with open(self._trace_name(rank=0), "rb") as f:
t = pickle.load(f)
t = t["entries"]
self.assertEqual(len(t), 2)
self.assertEqual(t[0]["collective_seq_id"], 1)
self.assertEqual(t[0]["state"], "completed")
self.assertEqual(t[1]["collective_seq_id"], 2)
self.assertEqual(
t[1]["state"], self.started_or_scheduled(timing_enabled)
)
self.assertFalse(os.path.exists(self._trace_name(rank=1)))
return
pg = self._create_process_group_nccl()
if timing_enabled:
# we force disabled timing in setup, since there is no 'disable' function
pg._enable_collectives_timing()
device = self.local_device
with torch.cuda.device(device):
a = torch.full((3, 4), float(self.rank), device=device)
pg.allreduce(a).wait()
if self.rank == 0:
pg.allreduce(a).wait()
# rank 0 will crash before it passes the sync, but rank1 will exit quickly and cleanly
torch.cuda.synchronize(device=device)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
@skip_but_pass_in_sandcastle
class NCCLTraceTestDumpOnTimeout(NCCLTraceTestDumpOnTimeoutBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
_check_return_codes
|
def _check_return_codes(self, elapsed_time):
# the base test infra assumes processes exit with matching return codes,
# but we want rank0 to abort and rank1 to exit cleanly in this test
self.assertEqual(self.processes[0].exitcode, -6)
self.assertEqual(self.processes[1].exitcode, 0)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class NCCLTraceTestDumpOnTimeoutBase(NCCLTraceTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
_trace_name
|
def _trace_name(self, rank):
return self._trace_basename() + str(rank)
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class NCCLTraceTestBase(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
started_or_scheduled
|
def started_or_scheduled(self, timing_enabled):
return "started" if timing_enabled else "scheduled"
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class NCCLTraceTestBase(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
open_file_with_timeout
|
def open_file_with_timeout(file_path, mode, timeout=1.0):
start_time = time.time()
while time.time() - start_time < timeout:
if os.path.exists(file_path):
return open(file_path, mode)
time.sleep(0.1)
raise FileNotFoundError
if self.rank == self.MAIN_PROCESS_RANK:
for c in self.children_pipes:
self.assertEqual(c.recv(), "next")
dump_file = self._trace_name(rank=0)
pipe_file = dump_file + ".pipe"
with open_file_with_timeout(pipe_file, "w") as f:
f.write("1\n")
with open_file_with_timeout(dump_file, "rb", timeout=10.0) as f:
self.assertTrue("all_reduce" in str(pickle.load(f)))
for c in self.children_pipes:
c.send("next")
return
pg = self._create_process_group_nccl()
device = self.local_device
a = torch.full((3, 4), float(self.rank), device=device)
for i in range(2):
f = pg.allreduce(a)
f.wait()
torch.cuda.synchronize(device=device)
self.parent.send("next")
self.parent.recv()
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_nccl.py
|
test_comm_recursive_split_group
|
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
def test_comm_recursive_split_group(self):
store = c10d.FileStore(self.file_name, self.world_size)
device = torch.device(f"cuda:{self.rank}")
pg = self._create_process_group_nccl(store, self.opts(), device_id=device)
backend = pg._get_backend(torch.device(device))
# split the default PG into 2 subgroups, each subgroup (ng1) has 4 ranks.
tensor1 = torch.full((1,), self.rank).cuda(device)
ng1 = c10d.split_group(pg, [[0, 1, 2, 3], [4, 5, 6, 7]])
backend1 = ng1._get_backend(torch.device(device))
if self.rank < 4:
dist.broadcast(tensor1, 0, group=ng1)
self.assertEqual(tensor1, torch.full((1,), 0))
else:
dist.broadcast(tensor1, 4, group=ng1)
self.assertEqual(tensor1, torch.full((1,), 4))
# comm split happens eagerly since device_id is passed to init_process_group.
self.assertEqual(backend.comm_split_count(), 1)
self.assertEqual(backend1.comm_split_count(), 0)
# further split ng1 into 2 subgroups, each subgroup (ng2) has 2 ranks.
tensor2 = torch.full((1,), self.rank).cuda(device)
ng2 = c10d.split_group(ng1, [[0, 1], [2, 3]])
backend2 = ng2._get_backend(torch.device(device))
self.assertEqual(backend.comm_split_count(), 1)
self.assertEqual(backend1.comm_split_count(), 1)
self.assertEqual(backend2.comm_split_count(), 0)
# execute collective calls within each 2-rank pg
if self.rank == 0 or self.rank == 1:
dist.broadcast(tensor2, 1, group=ng2)
self.assertEqual(tensor2, torch.full((1,), 1))
if self.rank == 2 or self.rank == 3:
dist.broadcast(tensor2, 2, group=ng2)
self.assertEqual(tensor2, torch.full((1,), 2))
if self.rank == 4 or self.rank == 5:
dist.broadcast(tensor2, 5, group=ng2)
self.assertEqual(tensor2, torch.full((1,), 5))
if self.rank == 6 or self.rank == 7:
dist.broadcast(tensor2, 6, group=ng2)
self.assertEqual(tensor2, torch.full((1,), 6))
# a barrier and a cuda sync before destroying all pgs.
dist.barrier(pg)
torch.cuda.synchronize()
dist.destroy_process_group()
|
import copy
import json
import os
import pickle
import random
import re
import signal
import sys
import tempfile
import threading
import time
import warnings
from contextlib import contextmanager
from datetime import datetime, timedelta
from enum import auto, Enum
from itertools import chain, product
from unittest import mock, SkipTest
import torch
import torch.distributed as c10d
from typing import Dict, List
import test_c10d_common
from test_c10d_common import ConvNet, DoubleGpuNet, gpus_for_rank, ModuleForDdpCommHook
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch._C._distributed_c10d import OpType
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
get_timeout,
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
TEST_SKIPS,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
retry_on_connect_failures,
run_tests,
skip_but_pass_in_sandcastle,
skip_but_pass_in_sandcastle_if,
TEST_CUDA,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
TestCase,
)
BFLOAT16_AVAILABLE = torch.cuda.is_available() and (
(torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11)
or torch.version.hip is not None
)
from torch.distributed.distributed_c10d import _get_process_group_uid
from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter
from torch.testing._internal.common_cuda import SM80OrLater
class ProcessGroupNCCLLargerScaleTest(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/test_c10d_object_collectives.py
|
with_comms
|
def with_comms(func=None):
if func is None:
return partial(
with_comms,
)
@wraps(func)
def wrapper(self, *args, **kwargs):
if BACKEND == dist.Backend.NCCL and torch.cuda.device_count() < self.world_size:
sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
self.dist_init()
func(self)
self.destroy_comms()
return wrapper
class TestObjectCollectives(MultiProcessTestCase):
def setUp(self):
super().setUp()
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["BACKEND"] = BACKEND
self._spawn_processes()
@property
def device(self):
return torch.device(self.rank) if BACKEND == dist.Backend.NCCL \
else torch.device("cpu")
@property
def world_size(self):
return WORLD_SIZE
@property
def process_group(self):
return dist.group.WORLD
def destroy_comms(self):
# Wait for all ranks to reach here before starting shutdown.
dist.barrier()
dist.destroy_process_group()
def dist_init(self):
dist.init_process_group(
backend=BACKEND,
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
# set device for nccl pg for collectives
if BACKEND == "nccl":
torch.cuda.set_device(self.rank)
@with_comms()
def test_all_gather_object(self):
output = [None] * dist.get_world_size()
dist.all_gather_object(
object_list=output,
obj=self.rank)
for i, v in enumerate(output):
self.assertEqual(i, v, f"rank: {self.rank}")
@with_comms()
def test_gather_object(self):
output = [None] * dist.get_world_size() if self.rank == 0 else None
dist.gather_object(
obj=self.rank,
object_gather_list=output)
if self.rank == 0:
for i, v in enumerate(output):
self.assertEqual(i, v, f"rank: {self.rank}")
@with_comms()
def test_broadcast_object_list(self):
val = 99 if self.rank == 0 else None
object_list = [val] * dist.get_world_size()
# TODO test with broadcast_object_list's device argument
dist.broadcast_object_list(object_list=object_list)
self.assertEqual(99, object_list[0])
@with_comms()
def test_scatter_object_list(self):
input_list = list(range(dist.get_world_size())) if self.rank == 0 else None
output_list = [None]
dist.scatter_object_list(
scatter_object_output_list=output_list,
scatter_object_input_list=input_list)
self.assertEqual(self.rank, output_list[0])
if __name__ == "__main__":
run_tests()
|
def with_comms(func=None):
if func is None:
return partial(
with_comms,
)
@wraps(func)
def wrapper(self, *args, **kwargs):
if BACKEND == dist.Backend.NCCL and torch.cuda.device_count() < self.world_size:
sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
self.dist_init()
func(self)
self.destroy_comms()
return wrapper
class TestObjectCollectives(MultiProcessTestCase):
def setUp(self):
super().setUp()
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["BACKEND"] = BACKEND
self._spawn_processes()
@property
def device(self):
return (
torch.device(self.rank)
if BACKEND == dist.Backend.NCCL
else torch.device("cpu")
)
@property
def world_size(self):
return WORLD_SIZE
@property
def process_group(self):
return dist.group.WORLD
def destroy_comms(self):
# Wait for all ranks to reach here before starting shutdown.
dist.barrier()
dist.destroy_process_group()
def dist_init(self):
dist.init_process_group(
backend=BACKEND,
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
# set device for nccl pg for collectives
if BACKEND == "nccl":
torch.cuda.set_device(self.rank)
@with_comms()
def test_all_gather_object(self):
output = [None] * dist.get_world_size()
dist.all_gather_object(object_list=output, obj=self.rank)
for i, v in enumerate(output):
self.assertEqual(i, v, f"rank: {self.rank}")
@with_comms()
def test_gather_object(self):
output = [None] * dist.get_world_size() if self.rank == 0 else None
dist.gather_object(obj=self.rank, object_gather_list=output)
if self.rank == 0:
for i, v in enumerate(output):
self.assertEqual(i, v, f"rank: {self.rank}")
@with_comms()
def test_send_recv_object_list(self):
val = 99 if self.rank == 0 else None
object_list = [val] * dist.get_world_size()
if self.rank == 0:
dist.send_object_list(object_list, 1)
if self.rank == 1:
dist.recv_object_list(object_list, 0)
if self.rank < 2:
self.assertEqual(99, object_list[0])
else:
self.assertEqual(None, object_list[0])
@with_comms()
def test_broadcast_object_list(self):
val = 99 if self.rank == 0 else None
object_list = [val] * dist.get_world_size()
# TODO test with broadcast_object_list's device argument
dist.broadcast_object_list(object_list=object_list)
self.assertEqual(99, object_list[0])
@with_comms()
def test_scatter_object_list(self):
input_list = list(range(dist.get_world_size())) if self.rank == 0 else None
output_list = [None]
dist.scatter_object_list(
scatter_object_output_list=output_list, scatter_object_input_list=input_list
)
self.assertEqual(self.rank, output_list[0])
# Test Object Collectives With Sub Pg
def setup_sub_pg(self):
rank = dist.get_rank()
base_rank = rank - (rank % 2)
ranks = [base_rank, base_rank + 1]
my_pg = dist.new_group(ranks, use_local_synchronization=True)
return rank, ranks, my_pg
@with_comms()
def test_subpg_scatter_object(self):
rank, ranks, my_pg = self.setup_sub_pg()
out_list = [None]
dist.scatter_object_list(out_list, ranks, src=ranks[0], group=my_pg)
self.assertEqual(rank, out_list[0])
@with_comms()
def test_subpg_all_gather_object(self):
rank, ranks, my_pg = self.setup_sub_pg()
out_list = [None] * len(ranks)
dist.all_gather_object(out_list, rank, group=my_pg)
self.assertEqual(ranks, out_list)
@with_comms()
def test_subpg_gather_object(self):
rank, ranks, my_pg = self.setup_sub_pg()
out_list = [None] * len(ranks) if rank == ranks[0] else None
dist.gather_object(rank, out_list, dst=ranks[0], group=my_pg)
if rank == ranks[0]:
self.assertEqual(ranks, out_list)
@with_comms()
def test_subpg_broadcast_object(self):
rank, ranks, my_pg = self.setup_sub_pg()
out_list = [None]
if rank == ranks[0]:
out_list[0] = rank
dist.broadcast_object_list(out_list, src=ranks[0], group=my_pg)
self.assertEqual(ranks[0], out_list[0])
if __name__ == "__main__":
run_tests()
|
import os
import sys
from functools import wraps, partial
import torch
import torch.distributed as dist
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
TEST_SKIPS
)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO
WORLD_SIZE = min(4, max(2, torch.cuda.device_count()))
|
import os
import sys
from functools import partial, wraps
import torch
import torch.distributed as dist
from torch.testing._internal.common_distributed import MultiProcessTestCase, TEST_SKIPS
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO
WORLD_SIZE = min(4, max(2, torch.cuda.device_count()))
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_ops_nccl.py
|
reduce_scatter_base
|
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
# anticipate an error
with self.assertRaisesRegex(
ValueError,
"input tensor must be the same size as output size times world size",
):
input_t = torch.tensor([self.rank]).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(
local_device_id
)
# fails the check because output_t is not correctly sized
reduce_scatter_base(output_t, input_t)
# anticipate an error
with self.assertRaisesRegex(
TypeError, "input tensor must be the same type as the output tensor."
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
local_device_id
)
# fails the check because the dtype is different
reduce_scatter_base(output_t, tensor)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
reduce_scatter
|
def reduce_scatter(outputs, input_lists, op):
opts = c10d.ReduceScatterOptions()
opts.reduceOp = op
work = pg.reduce_scatter(outputs, input_lists, opts)
work.wait()
output = [torch.tensor([0]).cuda(i) for i in local_device_ids]
# GPU/rank
# 0 [1], [2], [3], [4]
# 1 [2], [3], [4], [5]
# 2 [3], [4], [5], [6]
# 3 [4], [5], [6], [7]
# Sum
tensor_lists = []
input_per_gpu = []
for i in range(self.world_size):
input_per_gpu.append(torch.tensor([self.rank + i + 1]))
for gpu in local_device_ids:
tensor_lists.append([t.cuda(device=gpu) for t in input_per_gpu])
reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)
for i in range(num_gpus):
expected = torch.tensor(
[
(1 + self.world_size) * self.world_size // 2
+ self.world_size * self.rank
]
)
self.assertEqual(expected, output[i])
# Min
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)
for i in range(num_gpus):
expected = torch.tensor([self.rank + 1 + i])
self.assertEqual(expected, output[i])
# Max
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(num_gpus):
expected = torch.tensor([self.rank + self.world_size + i])
self.assertEqual(expected, output[i])
# Product
reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)
# math package don't have math.perm until python 3.8, so
# we implement a naive version here.
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
perm
|
def perm(n, k):
prod_val = n
for val in range(n - k + 1, n):
prod_val *= val
return prod_val
for i in range(num_gpus):
prod_val = perm(self.rank + self.world_size, self.world_size)
expected = torch.tensor([prod_val])
self.assertEqual(expected, output[i])
# Test the input params overridden scenarios, aka, when the input is
# a list and output is just one tensor.
# Sum
output_tensor = torch.empty_like(input_per_gpu[0][0]).cuda(self.rank)
input_list = [tensor[0].cuda(self.rank) for tensor in input_per_gpu]
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.SUM).wait()
expected = torch.tensor(
(1 + self.world_size) * self.world_size // 2 + self.world_size * self.rank
)
self.assertEqual(expected, output_tensor)
# Min
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.MIN).wait()
expected = torch.tensor(self.rank + 1)
self.assertEqual(expected, output_tensor)
# Max
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.MAX).wait()
expected = torch.tensor(self.rank + self.world_size)
self.assertEqual(expected, output_tensor)
# Product
pg.reduce_scatter(output_tensor, input_list, c10d.ReduceOp.PRODUCT).wait()
prod_val = self.rank + 1
for k in range(1, self.world_size):
prod_val = prod_val * (self.rank + 1 + k)
expected = torch.tensor(prod_val)
self.assertEqual(expected, output_tensor)
if torch.cuda.nccl.version() >= (2, 11, 1):
for factor in (3.0, torch.tensor([5.0], device=self.rank)):
if isinstance(factor, torch.Tensor):
factor_ref = factor.cpu().item()
else:
factor_ref = factor
output = [t.float() for t in output]
tensor_lists = [[t.float() for t in tl] for tl in tensor_lists]
output_ref = [t.float() for t in output]
tensor_lists_ref = [
[t.float() * factor_ref for t in tl] for tl in tensor_lists
]
reduce_scatter(output, tensor_lists, c10d._make_nccl_premul_sum(factor))
reduce_scatter(output_ref, tensor_lists_ref, c10d.ReduceOp.SUM)
self.assertEqual(output_ref, output)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
reduce_scatter_base
|
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
# anticipate an error
with self.assertRaisesRegex(
ValueError,
"input tensor must be the same size as output size times world size",
):
input_t = torch.tensor([self.rank]).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(
local_device_id
)
# fails the check because output_t is not correctly sized
reduce_scatter_base(output_t, input_t)
# anticipate an error
with self.assertRaisesRegex(
TypeError, "input tensor must be the same type as the output tensor."
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(local_device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
local_device_id
)
# fails the check because the dtype is different
reduce_scatter_base(output_t, tensor)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_ops_nccl.py
|
allreduce
|
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.SUM)
ndev = self.world_size
self.assertEqual(
torch.tensor([ndev * (ndev + 1) // 2]),
tensors[0],
)
# Avg (only available for NCCL 2.10+)
if torch.cuda.nccl.version() >= (2, 10, 0):
tensors = [torch.tensor([self.rank + 1.0]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.AVG)
ndev = self.world_size
self.assertEqual(
torch.tensor([ndev * (ndev + 1.0) / (2.0 * ndev)]),
tensors[0],
)
# Premul Sum
if torch.cuda.nccl.version() >= (2, 11, 1):
for dtype in torch.half, torch.float, torch.double:
for factor in (
3.0,
torch.tensor([5.0], device=local_device_id, dtype=dtype),
):
tensors = [
torch.tensor([self.rank + 1])
.cuda(local_device_id)
.to(dtype=dtype)
]
allreduce(tensors, c10d._make_nccl_premul_sum(factor))
self.assertEqual(
factor
* torch.tensor(
[self.world_size * (self.world_size + 1) / 2],
dtype=dtype,
device=local_device_id,
),
tensors[0],
)
# Product
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.PRODUCT)
self.assertEqual(torch.tensor([math.factorial(self.world_size)]), tensors[0])
# Min
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.MIN)
self.assertEqual(torch.tensor([1]), tensors[0])
# Max
tensors = [torch.tensor([self.rank + 1]).cuda(local_device_id)]
allreduce(tensors, c10d.ReduceOp.MAX)
self.assertEqual(torch.tensor([self.world_size]), tensors[0])
for op, err in zip(
(c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR),
("ReduceOp.BAND", "ReduceOp.BOR", "ReduceOp.BXOR"),
):
with self.assertRaisesRegex(ValueError, "Cannot use " + err + " with NCCL"):
allreduce(tensors, op)
|
import math
import os
import sys
import tempfile
import torch
import torch.distributed as c10d
import torch.distributed as dist
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
skip_but_pass_in_sandcastle_if,
skipIfRocm,
TEST_WITH_DEV_DBG_ASAN,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_object_collectives.py
|
test_subpg_broadcast_object
|
if __name__ == "__main__":
run_tests()
|
def test_subpg_broadcast_object(self):
rank, ranks, my_pg = self.setup_sub_pg()
out_list = [None]
if rank == ranks[0]:
out_list[0] = rank
dist.broadcast_object_list(out_list, src=ranks[0], group=my_pg)
self.assertEqual(ranks[0], out_list[0])
|
import os
import sys
from functools import partial, wraps
import torch
import torch.distributed as dist
from torch.testing._internal.common_distributed import MultiProcessTestCase, TEST_SKIPS
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO
WORLD_SIZE = min(4, max(2, torch.cuda.device_count()))
class TestObjectCollectives(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.