library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/distributed/_tensor/test_op_strategy.py
|
test_mm_1d_mesh
|
def test_mm_1d_mesh(self):
mesh = self.build_device_mesh()
all_strats = gen_einsum_strategies("mk,kn->mn", mesh)
self.assertEqual(len(all_strats.strategies), 4)
|
from itertools import chain
import torch
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor.placement_types import (
DTensorSpec,
Partial,
Replicate,
Shard,
TensorMeta,
)
from torch.distributed.tensor._collective_utils import redistribute_cost
from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy
from torch.distributed.tensor._ops._einsum_strategy import (
EinsumDims,
gen_einsum_strategies,
)
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase
class TestEinsumStrategies(DTensorOpTestBase):
from torch.distributed.tensor._ops._matrix_ops import addmm_strategy
from torch.distributed.tensor._ops._matrix_ops import mm_strategy
from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_op_strategy.py
|
test_mm_2d_mesh
|
def test_mm_2d_mesh(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size).reshape(2, 2))
all_strats = gen_einsum_strategies("mk,kn->mn", mesh)
self.assertEqual(len(all_strats.strategies), 16)
|
from itertools import chain
import torch
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor.placement_types import (
DTensorSpec,
Partial,
Replicate,
Shard,
TensorMeta,
)
from torch.distributed.tensor._collective_utils import redistribute_cost
from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy
from torch.distributed.tensor._ops._einsum_strategy import (
EinsumDims,
gen_einsum_strategies,
)
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase
class TestEinsumStrategies(DTensorOpTestBase):
from torch.distributed.tensor._ops._matrix_ops import addmm_strategy
from torch.distributed.tensor._ops._matrix_ops import mm_strategy
from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_op_strategy.py
|
test_bmm_1d_mesh
|
def test_bmm_1d_mesh(self):
mesh = self.build_device_mesh()
all_strats = gen_einsum_strategies("bmk,bkn->bmn", mesh)
self.assertEqual(len(all_strats.strategies), 5)
|
from itertools import chain
import torch
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor.placement_types import (
DTensorSpec,
Partial,
Replicate,
Shard,
TensorMeta,
)
from torch.distributed.tensor._collective_utils import redistribute_cost
from torch.distributed.tensor._op_schema import OpSchema, OpStrategy, PlacementStrategy
from torch.distributed.tensor._ops._einsum_strategy import (
EinsumDims,
gen_einsum_strategies,
)
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import DTensorOpTestBase
class TestEinsumStrategies(DTensorOpTestBase):
from torch.distributed.tensor._ops._matrix_ops import addmm_strategy
from torch.distributed.tensor._ops._matrix_ops import mm_strategy
from torch.distributed.tensor._ops._matrix_ops import bmm_strategy
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_optimizers.py
|
output_fn
|
def output_fn(mod, outputs, device_mesh):
assert isinstance(outputs, DTensor)
return outputs.redistribute(placements=[Replicate()] * device_mesh.ndim).to_local()
class TestDTensorOptimizer(DTensorTestBase):
def _assert_optimizer(
self,
mesh,
model,
optim,
dist_model,
dist_optim,
inputs,
*,
rtol: float = 1.3e-6,
atol: float = 1e-5,
):
for iter_idx in range(2):
# run forward/backward/optim for original model
optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
out = model(inputs)
loss = out.sum()
loss.backward()
optim.step()
# run forward/backward/optim for distributed model
dist_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
dist_out = dist_model(inputs)
dist_loss = dist_out.sum()
dist_loss.backward()
dist_optim.step()
# check that the optimizer update parameters with same numerics
for p1, p2 in zip(model.parameters(), dist_model.parameters()):
p2 = p2.full_tensor()
# Default 'rtol' and 'atol' for attr:`~torch.float32` are ``1.3e-6`` and ``1e-5``
self.assertEqual(p1, p2, atol=atol, rtol=rtol)
def test_optimizer_foreach_supported_types_include_DTensor(self):
from torch.optim.optimizer import _foreach_supported_types
self.assertTrue(DTensor in _foreach_supported_types)
@with_comms
def test_adam_1d_sharding(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
# lr as a Tensor is not supported for capturable=False and foreach=True
adam_float_lr_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05},
{"lr": 0.1, "weight_decay": 0.05, "amsgrad": True},
{
"lr": 0.1,
"weight_decay": 0.05,
"maximize": True,
"amsgrad": True,
},
]
fused_adam_float_lr_configs = [
{"lr": 0.1, "fused": True},
{"lr": 0.1, "weight_decay": 0.05, "amsgrad": True, "fused": True},
{
"lr": 0.1,
"weight_decay": 0.05,
"maximize": True,
"amsgrad": True,
"fused": True,
},
]
# lr could be a Tensor or a float when fused=True for adam optimizer
fused_adam_tensor_lr_configs = [
{**config, "lr": torch.tensor(0.1)}
for config in fused_adam_float_lr_configs
]
fused_adam_tensor_lr_configs.extend(
[
{**config, "lr": torch.tensor([0.1])}
for config in fused_adam_float_lr_configs
]
)
adam_configs = [
*adam_float_lr_configs,
*fused_adam_float_lr_configs,
*fused_adam_tensor_lr_configs,
]
for config in adam_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.Adam(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.Adam(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_adamw_1d_sharding(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
# lr as a Tensor is not supported for capturable=False and foreach=True
adamw_float_lr_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"amsgrad": True,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"maximize": True,
"amsgrad": True,
},
]
fused_adamw_float_lr_configs = [
{"lr": 0.1, "weight_decay": 0.05, "fused": True},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"amsgrad": True,
"fused": True,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"maximize": True,
"amsgrad": True,
"fused": True,
},
]
# lr could be a Tensor or a float when fused=True for adamW optimizer
fused_adamw_tensor_lr_configs = [
{**config, "lr": torch.tensor(0.1)}
for config in fused_adamw_float_lr_configs
]
fused_adamw_tensor_lr_configs.extend(
[
{**config, "lr": torch.tensor([0.1])}
for config in fused_adamw_float_lr_configs
]
)
adamw_configs = [
*adamw_float_lr_configs,
*fused_adamw_float_lr_configs,
*fused_adamw_tensor_lr_configs,
]
for config in adamw_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.AdamW(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.AdamW(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_sgd_1d_sharding(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
sgd_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "momentum": 0.05, "foreach": False},
{"lr": 0.1, "momentum": 0.05},
{"lr": 0.1, "momentum": 0.06, "dampening": 0.07},
{
"lr": 0.1,
"momentum": 0.08,
"weight_decay": 0.05,
"nesterov": True,
"maximize": True,
"foreach": False,
},
{
"lr": 0.1,
"momentum": 0.08,
"weight_decay": 0.05,
"nesterov": True,
"maximize": True,
},
]
for config in sgd_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.SGD(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.SGD(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_adagrad_1d_sharding(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
adagrad_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "lr_decay": 0.05, "foreach": False},
{"lr": 0.1, "lr_decay": 0.02, "weight_decay": 0.05, "foreach": False},
{
"lr": 0.1,
"lr_decay": 0.02,
"weight_decay": 0.05,
"initial_accumulator_value": 0.03,
"foreach": False,
},
{
"lr": 0.1,
"lr_decay": 0.02,
"weight_decay": 0.05,
"initial_accumulator_value": 0.03,
"eps": 1e-6,
"foreach": False,
},
{
"lr": 0.1,
"lr_decay": 0.02,
"weight_decay": 0.05,
"initial_accumulator_value": 0.03,
"eps": 1e-6,
"maximize": True,
"foreach": False,
},
{
"lr": 0.1,
"lr_decay": 0.02,
"weight_decay": 0.05,
"initial_accumulator_value": 0.03,
"eps": 1e-6,
"maximize": True,
},
]
for config in adagrad_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.Adagrad(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.Adagrad(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_RMSprop_1d_sharding(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
RMSprop_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "alpha": 0.85, "foreach": False},
{"lr": 0.1, "alpha": 0.88, "eps": 1e-6, "foreach": False},
{
"lr": 0.1,
"alpha": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"foreach": False,
},
{
"lr": 0.1,
"alpha": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"momentum": 0.9,
"foreach": False,
},
{
"lr": 0.1,
"alpha": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"momentum": 0.9,
"centered": True,
"foreach": False,
},
{
"lr": 0.1,
"alpha": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"momentum": 0.9,
"centered": True,
"maximize": True,
"foreach": False,
},
{
"lr": 0.1,
"alpha": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"momentum": 0.9,
"centered": True,
"maximize": True,
},
]
for config in RMSprop_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.RMSprop(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.RMSprop(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_adadelta_1d_sharding(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
adadelta_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "rho": 0.85, "foreach": False},
{"lr": 0.1, "rho": 0.88, "eps": 1e-5, "foreach": False},
{
"lr": 0.1,
"rho": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"foreach": False,
},
{
"lr": 0.1,
"rho": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
},
{
"lr": 0.1,
"rho": 0.88,
"eps": 1e-6,
"weight_decay": 0.05,
"maximize": True,
},
]
for config in adadelta_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.Adadelta(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.Adadelta(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_nadam_1d_sharding(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
nadam_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"decoupled_weight_decay": True,
},
]
for config in nadam_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.NAdam(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.NAdam(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_radam_1d_sharding(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
radam_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "weight_decay": 0.05, "foreach": False},
{
"lr": 0.1,
"weight_decay": 0.05,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"decoupled_weight_decay": True,
},
]
for config in radam_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.RAdam(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.RAdam(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_adamax_1d_sharding(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
adamax_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "betas": (0.6, 0.66), "foreach": False},
{"lr": 0.1, "betas": (0.6, 0.66), "eps": 1e-6, "foreach": False},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"foreach": False,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
},
{
"lr": 0.1,
"betas": (0.6, 0.66),
"eps": 1e-6,
"weight_decay": 0.05,
"maximize": True,
},
]
for config in adamax_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.Adamax(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.Adamax(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
self._assert_optimizer(mesh, mod, opt, dist_mod, dist_opt, inp)
@with_comms
def test_asgd_1d_sharding(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
asgd_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "lambd": 0.001, "foreach": False},
{"lr": 0.1, "lambd": 0.001, "alpha": 0.85, "foreach": False},
{"lr": 0.1, "lambd": 0.001, "alpha": 0.85, "t0": 1e5, "foreach": False},
{
"lr": 0.1,
"lambd": 0.001,
"alpha": 0.85,
"t0": 1e5,
"weight_decay": 0.05,
"foreach": False,
},
{
"lr": 0.1,
"lambd": 0.001,
"alpha": 0.85,
"t0": 1e5,
"weight_decay": 0.05,
"foreach": True,
},
{
"lr": 0.1,
"lambd": 0.001,
"alpha": 0.85,
"t0": 1e5,
"weight_decay": 0.05,
"foreach": True,
"maximize": True,
},
]
for config in asgd_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.ASGD(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.ASGD(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
# TODO: We want to keep a unit test for ASGD optimizer for the time being, but we need to look into why
# when using ASGD we need higher atol and rtol when comparing model parameters.
# Default 'rtol' and 'atol' for attr:`~torch.float32` are ``1.3e-6`` and ``1e-5``
# Pointer here: https://github.com/pytorch/pytorch/blob/main/torch/testing/_comparison.py#L65
self._assert_optimizer(
mesh, mod, opt, dist_mod, dist_opt, inp, atol=1.3e-5, rtol=1e-4
)
if __name__ == "__main__":
run_tests()
|
from copy import deepcopy
import torch
import torch.nn as nn
from torch.distributed._tensor import (
DeviceMesh,
distribute_module,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
with_comms,
)
from torch.optim.optimizer import _foreach_supported_types
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_embedding_ops.py
|
test_multiple_embeddings_rowwise
|
def test_multiple_embeddings_rowwise(self):
mesh = self.build_device_mesh()
inp = torch.randint(0, 10, (4, 4), device=self.device_type)
replicated_inp = DTensor.from_local(inp, mesh, [Replicate()], run_check=False)
from torch.distributed.tensor._ops._embedding_ops import _MaskPartial
# case 1: two embeddings with the same shape, thus sharing the underying _MaskPartial
# and MaskBuffer, because of cache hit from sharding propagation
emb1 = torch.nn.Embedding(10, 23, device=self.device_type)
sharded_emb1 = self._apply_sharding(emb1, 0, mesh)
output1 = sharded_emb1(replicated_inp)
emb2 = torch.nn.Embedding(10, 29, device=self.device_type)
sharded_emb2 = self._apply_sharding(emb2, 0, mesh)
output2 = sharded_emb2(replicated_inp)
partial_placement1 = output1.placements[0]
self.assertIsInstance(partial_placement1, _MaskPartial)
output1.full_tensor()
partial_placement2 = output2.placements[0]
self.assertIsInstance(partial_placement2, _MaskPartial)
output2.full_tensor()
self.assertTrue(id(partial_placement1), id(partial_placement2))
# case 2: two embeddings with the same logical_dim_size, but different logical_shape
# thus they will have different _MaskPartial placements (with no cache hit)
emb3 = torch.nn.Embedding(10, 29, device=self.device_type)
sharded_emb3 = self._apply_sharding(emb3, 0, mesh)
output3 = sharded_emb3(replicated_inp)
partial_placement3 = output3.placements[0]
self.assertIsInstance(partial_placement3, _MaskPartial)
output2.full_tensor()
# not equal because of different logical_shape, despite of same logical_dim_size
self.assertNotEqual(partial_placement1, partial_placement3)
|
import sys
import torch
from torch.distributed._tensor import (
distribute_module,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
funcol = torch.ops.c10d_functional
class TestEmbeddingOp(DTensorTestBase):
from torch.distributed.tensor._ops._embedding_ops import _MaskPartial
from torch.distributed.tensor._ops._embedding_ops import _MaskPartial
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_experimental_ops.py
|
test_nll
|
def test_nll(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
shard_spec = [Replicate()]
pred_list = torch.rand(ITER_TIME, 1024, 10)
target_list = torch.randint(0, 10, (ITER_TIME, 1024), dtype=torch.long)
criterion = torch.nn.CrossEntropyLoss()
for i in range(ITER_TIME):
pred = pred_list[i].to(self.device_type).requires_grad_()
target = target_list[i].to(self.device_type)
# nll with dtensor
pred_dtensor = distribute_tensor(pred, device_mesh, shard_spec)
target_dtensor = distribute_tensor(target, device_mesh, shard_spec)
loss = criterion(pred_dtensor, target_dtensor)
loss.backward()
# nll with plain tensor
loss_gt = criterion(pred, target)
loss_gt.backward()
loss_diff_abs = loss.to_local() - loss_gt
loss_diff_rel = loss_diff_abs / (torch.abs(loss_gt) + 1e-8)
loss_mse_abs = torch.mean(loss_diff_abs * loss_diff_abs).item()
loss_mse_rel = torch.mean(loss_diff_rel * loss_diff_rel).item()
grad_diff_abs = pred_dtensor.grad.to_local() - pred.grad
grad_diff_rel = grad_diff_abs / (torch.abs(pred.grad) + 1e-8)
grad_mse_abs = torch.mean(grad_diff_abs * grad_diff_abs).item()
grad_mse_rel = torch.mean(grad_diff_rel * grad_diff_rel).item()
self.assertTrue(
loss_mse_abs <= 1e-6,
f"Too large absolute mse for loss, expected less equal 1e-6, got {loss_mse_abs}",
)
self.assertTrue(
loss_mse_rel <= 1e-6,
f"Too large relative mse for loss, expected less equal 1e-6, got {loss_mse_rel}",
)
self.assertTrue(
grad_mse_abs <= 1e-6,
f"Too large absolute mse for gradient, expected less equal 1e-6, got {grad_mse_abs}",
)
self.assertTrue(
grad_mse_rel <= 1e-6,
f"Too large relative mse for gradient, expected less equal 1e-6, got {grad_mse_rel}",
)
|
import torch
import torch.distributed as dist
from torch.distributed._tensor import DeviceMesh, distribute_tensor, Replicate
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
ITER_TIME = 10
LR = 0.001
class DistOtherOpsTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_init.py
|
test_init_ops
|
def test_init_ops(self):
self._run_init_op(
torch.nn.init.kaiming_uniform_,
a=0,
mode="fan_in",
nonlinearity="leaky_relu",
)
self._run_init_op(torch.nn.init.normal_, mean=1.5, std=0.8)
self._run_init_op(torch.nn.init.uniform_, a=0, b=1.2)
self._run_init_op(torch.nn.init.constant_, 2.4)
|
def test_init_ops(self):
# NOTE: random init tests are moved to test_random_ops.py
self._run_init_op(torch.nn.init.constant_, 2.4)
|
import torch
from torch.distributed._tensor import (
DTensor,
Shard,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class DTensorInitOpsTest(DTensorTestBase):
|
import torch
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard, zeros
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class DTensorInitOpsTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_tensor/test_init.py
|
world_size
|
def world_size(self):
return 4
|
import torch
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard, zeros
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class DTensorConstructorTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_pointwise_ops.py
|
test_dropout_errors
|
def test_dropout_errors(self):
device_mesh = self.build_device_mesh()
with self.assertRaisesRegex(RuntimeError, "supported"):
self._run_sharded_elementwise_ops(
device_mesh=device_mesh,
placements=[_Partial(ReduceOp.SUM)],
input_size=(8, 5),
op=torch.nn.functional.dropout,
)
|
def test_dropout_errors(self):
device_mesh = self.build_device_mesh()
with self.assertRaisesRegex(RuntimeError, "supported"):
self._run_sharded_elementwise_ops(
device_mesh=device_mesh,
placements=[Partial("sum")],
input_size=(8, 5),
op=torch.nn.functional.dropout,
)
|
from typing import Any, Callable, Dict, Optional, Sequence
from unittest import skip
import torch
import torch.utils._pytree as pytree
from torch import Tensor
from torch.distributed._tensor import DeviceMesh, distribute_tensor, DTensor
from torch.distributed._tensor.placement_types import (
_Partial,
Placement,
Replicate,
Shard,
)
from torch.distributed.distributed_c10d import ReduceOp
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorOpTestBase,
skip_unless_torch_gpu,
)
class DistElementwiseOpsTest(DTensorOpTestBase):
|
from typing import Any, Callable, Dict, Optional, Sequence
from unittest import skip
import torch
import torch.utils._pytree as pytree
from torch import Tensor
from torch.distributed._tensor import DeviceMesh, distribute_tensor, DTensor
from torch.distributed._tensor.placement_types import (
Partial,
Placement,
Replicate,
Shard,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorOpTestBase,
skip_unless_torch_gpu,
)
class DistElementwiseOpsTest(DTensorOpTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_tensor/test_random_ops.py
|
test_init_ops
|
def test_init_ops(self):
self._run_init_op(
torch.nn.init.kaiming_uniform_,
a=0,
mode="fan_in",
nonlinearity="leaky_relu",
)
self._run_init_op(torch.nn.init.normal_, mean=1.5, std=0.8)
self._run_init_op(torch.nn.init.uniform_, a=0, b=1.2)
for dtype in (torch.float32, torch.float16):
self._run_init_op(torch.rand_like, dtype=dtype)
self._run_init_op(torch.randn_like, dtype=dtype)
self._run_init_op(torch.randint_like, low=0, high=100, dtype=dtype)
|
import itertools
import torch
import torch.distributed._functional_collectives as funcol
import torch.distributed.tensor._random as random
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor._utils import compute_local_shape_and_global_offset
from torch.distributed._tensor.api import distribute_tensor
from torch.distributed._tensor.placement_types import Replicate, Shard
from torch.distributed.distributed_c10d import broadcast_object_list
from torch.distributed.tensor._random import is_rng_supported_mesh, manual_seed
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
skip_unless_torch_gpu,
with_comms,
)
class DistTensorRandomInitTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_dtensor_ops.py
|
assert_ref_dtensor_equal
|
def assert_ref_dtensor_equal(self, dtensor_rs, rs):
flat_dtensor_rs, _ = tree_flatten(dtensor_rs)
flat_rs, _ = tree_flatten(rs)
self.assertEqual(len(flat_dtensor_rs), len(flat_rs))
for dtensor_r, r in zip(flat_dtensor_rs, flat_rs):
if not isinstance(r, torch.Tensor):
continue
self.assertIsInstance(dtensor_r, torch.Tensor)
self.assertEqualOnRank(
dtensor_r.shape,
r.shape,
f"Shape mismatch! original shape:{r.shape}, dtensor shape: {dtensor_r.shape}",
)
self.assertEqualOnRank(
dtensor_r.requires_grad,
r.requires_grad,
"op result requires_grad mismatch!"
f"original requires_grad: {r.requires_grad}, "
f"dtensor requires_grad: {dtensor_r.requires_grad}",
)
self.assertEqualOnRank(dtensor_r.to_local(), r)
|
def assert_ref_dtensor_equal(self, dtensor_rs, rs):
flat_dtensor_rs = pytree.tree_leaves(dtensor_rs)
flat_rs = pytree.tree_leaves(rs)
self.assertEqual(len(flat_dtensor_rs), len(flat_rs))
for dtensor_r, r in zip(flat_dtensor_rs, flat_rs):
if not isinstance(r, torch.Tensor):
continue
self.assertIsInstance(dtensor_r, torch.Tensor)
self.assertEqualOnRank(
dtensor_r.shape,
r.shape,
f"Shape mismatch! original shape:{r.shape}, dtensor shape: {dtensor_r.shape}",
)
self.assertEqualOnRank(
dtensor_r.requires_grad,
r.requires_grad,
"op result requires_grad mismatch!"
f"original requires_grad: {r.requires_grad}, "
f"dtensor requires_grad: {dtensor_r.requires_grad}",
)
self.assertEqualOnRank(dtensor_r, r)
|
import unittest
import warnings
import torch
import torch.distributed as dist
import torch.testing._internal.common_methods_invocations as common_ops
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate
from torch.overrides import resolve_name
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
DecorateInfo,
op_db,
)
from torch.testing._internal.common_utils import (
run_tests,
suppress_warnings,
TEST_WITH_ASAN,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorConverter,
DTensorOpTestBase,
)
from torch.utils._pytree import tree_flatten, tree_map
common_ops.L = 24
common_ops.M = 12
common_ops.S = 4
common_ops.XS = 2
dtensor_fails = {
# these sometimes pass and sometimes fail
# we need to remove many of them from list once op
# get full support with varying sharding specs
xfail("__getitem__"),
xfail("__rsub__"),
xfail("_native_batch_norm_legit"),
xfail("_softmax_backward_data"),
xfail("addbmm"),
xfail("addmv"),
xfail("addr"),
xfail("all"),
xfail("allclose"),
xfail("amax"),
xfail("amin"),
xfail("aminmax"),
xfail("any"),
xfail("arange"),
xfail("argmax"),
xfail("argmin"),
xfail("argsort"),
xfail("as_strided"),
xfail("as_strided", "partial_views"),
xfail("as_strided_scatter"),
xfail("baddbmm"),
xfail("bernoulli"),
xfail("block_diag"),
xfail("broadcast_shapes"),
xfail("cauchy"),
xfail("cartesian_prod"),
xfail("cdist"),
xfail("cholesky"),
xfail("cholesky_inverse"),
xfail("cholesky_solve"),
xfail("chunk"),
xfail("clamp"),
xfail("clamp_max"),
xfail("clamp_min"),
xfail("combinations"),
xfail("complex"),
xfail("constant_pad_nd"),
xfail("corrcoef"),
xfail("count_nonzero"),
xfail("cov"),
xfail("cross"),
xfail("cummax"),
xfail("cummin"),
xfail("cumsum"),
xfail("cumulative_trapezoid"),
xfail("diag"),
xfail("diag_embed"),
xfail("diagflat"),
xfail("diagonal"),
xfail("diagonal_copy"),
xfail("diagonal_scatter"),
xfail("dist"),
xfail("dot"),
xfail("einsum"),
xfail("empty"),
xfail("empty_like"),
xfail("exponential"),
xfail("eye"),
xfail("fft.fft2"),
xfail("fft.fft"),
xfail("fft.fftn"),
xfail("fft.fftshift"),
xfail("fft.ifft2"),
xfail("fft.ifft"),
xfail("fft.ifftshift"),
xfail("fft.ihfft2"),
xfail("fft.ihfft"),
xfail("fft.ihfftn"),
xfail("fft.irfft2"),
xfail("fft.irfftn"),
xfail("fft.rfft2"),
xfail("fft.rfft"),
xfail("fft.rfftn"),
xfail("fill"),
xfail("flip"),
xfail("fliplr"),
xfail("flipud"),
xfail("floor_divide"),
xfail("fmax"),
xfail("fmin"),
xfail("frexp"),
xfail("full"),
xfail("full_like"),
xfail("gather"),
xfail("geometric"),
xfail("geqrf"),
xfail("grid_sampler_2d"),
xfail("gradient"),
xfail("heaviside"),
xfail("histc"),
xfail("histogram"),
xfail("histogramdd"),
xfail("index_add"),
xfail("index_copy"),
xfail("index_fill"),
xfail("index_put"),
xfail("index_reduce"),
xfail("index_select"),
xfail("isin"),
xfail("isinf"),
xfail("isneginf"),
xfail("isposinf"),
xfail("kthvalue"),
xfail("linalg.cholesky"),
xfail("linalg.cholesky_ex"),
xfail("linalg.cond"),
xfail("linalg.cross"),
xfail("linalg.det"),
xfail("linalg.det", "singular"),
xfail("linalg.eig"),
xfail("linalg.eigh"),
xfail("linalg.eigvals"),
xfail("linalg.eigvalsh"),
xfail("linalg.householder_product"),
xfail("linalg.inv"),
xfail("linalg.inv_ex"),
xfail("linalg.ldl_factor"),
xfail("linalg.ldl_factor_ex"),
xfail("linalg.ldl_solve"),
xfail("linalg.lstsq"),
xfail("linalg.lstsq", "grad_oriented"),
xfail("linalg.lu"),
xfail("linalg.lu_factor"),
xfail("linalg.lu_factor_ex"),
xfail("linalg.lu_solve"),
xfail("linalg.matrix_norm"),
xfail("linalg.matrix_power"),
xfail("linalg.matrix_rank"),
xfail("linalg.matrix_rank", "hermitian"),
xfail("linalg.multi_dot"),
xfail("linalg.norm"),
xfail("linalg.norm", "subgradients_at_zero"),
xfail("linalg.pinv"),
xfail("linalg.pinv", "hermitian"),
xfail("linalg.qr"),
xfail("linalg.slogdet"),
xfail("linalg.solve"),
xfail("linalg.solve_ex"),
xfail("linalg.solve_triangular"),
xfail("linalg.svd"),
xfail("linalg.svdvals"),
xfail("linalg.tensorinv"),
xfail("linalg.tensorsolve"),
xfail("linalg.vander"),
xfail("linalg.vecdot"),
xfail("linalg.vector_norm"),
xfail("linspace"),
xfail("log_normal"),
xfail("log_softmax"),
xfail("log_softmax", "with_dtype"),
xfail("logcumsumexp"),
xfail("logdet"),
xfail("logspace"),
xfail("logsumexp"),
xfail("lt"),
xfail("lu"),
xfail("lu_solve"),
xfail("lu_unpack"),
xfail("masked_fill"),
xfail("masked_scatter"),
xfail("masked_select"),
xfail("masked.amax"),
xfail("masked.amin"),
xfail("masked.argmax"),
xfail("masked.argmin"),
xfail("masked.cumprod"),
xfail("masked.cumsum"),
xfail("masked.log_softmax"),
xfail("masked.logaddexp"),
xfail("masked.logsumexp"),
xfail("masked.median"),
xfail("masked.norm"),
xfail("masked.prod"),
xfail("masked.softmin"),
xfail("masked.softmax"),
xfail("masked.sum"),
xfail("matrix_exp"),
xfail("max", "binary"),
xfail("max", "reduction_no_dim"),
xfail("max", "reduction_with_dim"),
xfail("maximum"),
xfail("median"),
xfail("min", "binary"),
xfail("min", "reduction_no_dim"),
xfail("min", "reduction_with_dim"),
xfail("minimum"),
xfail("mode"),
xfail("msort"),
xfail("multinomial"),
xfail("mv"),
xfail("max_pool2d_with_indices_backward", ""),
xfail("nanmean"),
xfail("nanmedian"),
xfail("nanquantile"),
xfail("nansum"),
xfail("native_batch_norm"),
xfail("native_dropout_backward"),
xfail("native_layer_norm"),
xfail("narrow_copy"),
xfail("ne"),
xfail("new_empty"),
xfail("new_empty_strided"),
xfail("transpose"),
xfail("nn.functional.adaptive_avg_pool1d"),
xfail("nn.functional.adaptive_avg_pool2d"),
xfail("nn.functional.adaptive_avg_pool3d"),
xfail("nn.functional.adaptive_max_pool1d"),
xfail("nn.functional.adaptive_max_pool2d"),
xfail("nn.functional.adaptive_max_pool3d"),
xfail("nn.functional.alpha_dropout"),
xfail("nn.functional.avg_pool1d"),
xfail("nn.functional.avg_pool2d"),
xfail("nn.functional.avg_pool3d"),
xfail("nn.functional.batch_norm"),
xfail("nn.functional.batch_norm", "without_cudnn"),
xfail("nn.functional.bilinear"),
xfail("nn.functional.binary_cross_entropy"),
xfail("nn.functional.binary_cross_entropy_with_logits"),
xfail("nn.functional.celu"),
xfail("nn.functional.conv1d"),
xfail("nn.functional.conv2d"),
xfail("nn.functional.conv_transpose1d"),
xfail("nn.functional.conv_transpose2d"),
xfail("nn.functional.conv_transpose3d"),
xfail("nn.functional.cosine_similarity"),
xfail("nn.functional.cross_entropy"),
xfail("nn.functional.ctc_loss"),
xfail("nn.functional.dropout"),
xfail("nn.functional.dropout2d"),
xfail("nn.functional.dropout3d"),
xfail("nn.functional.elu"),
xfail("nn.functional.fractional_max_pool2d"),
xfail("nn.functional.fractional_max_pool3d"),
xfail("nn.functional.gaussian_nll_loss"),
xfail("nn.functional.glu"),
xfail("nn.functional.grid_sample"),
xfail("nn.functional.group_norm"),
xfail("nn.functional.hardshrink"),
xfail("nn.functional.hardsigmoid"),
xfail("nn.functional.hardswish"),
xfail("nn.functional.hardtanh"),
xfail("nn.functional.huber_loss"),
xfail("nn.functional.instance_norm"),
xfail("nn.functional.interpolate", "area"),
xfail("nn.functional.interpolate", "bicubic"),
xfail("nn.functional.interpolate", "bilinear"),
xfail("nn.functional.interpolate", "linear"),
xfail("nn.functional.interpolate", "nearest"),
xfail("nn.functional.interpolate", "trilinear"),
xfail("nn.functional.layer_norm"),
xfail("nn.functional.leaky_relu"),
xfail("nn.functional.linear"),
xfail("nn.functional.local_response_norm"),
xfail("nn.functional.logsigmoid"),
xfail("nn.functional.margin_ranking_loss"),
xfail("nn.functional.max_pool1d"),
xfail("nn.functional.max_pool2d"),
xfail("nn.functional.max_pool3d"),
xfail("nn.functional.max_unpool1d"),
xfail("nn.functional.max_unpool1d", "grad"),
xfail("nn.functional.max_unpool2d"),
xfail("nn.functional.max_unpool2d", "grad"),
xfail("nn.functional.max_unpool3d"),
xfail("nn.functional.max_unpool3d", "grad"),
xfail("nn.functional.mish"),
xfail("nn.functional.mse_loss"),
xfail("nn.functional.multi_margin_loss"),
xfail("nn.functional.multilabel_margin_loss"),
xfail("nn.functional.multilabel_soft_margin_loss"),
xfail("nn.functional.nll_loss"),
xfail("nn.functional.normalize"),
xfail("nn.functional.pad", "circular"),
xfail("nn.functional.pad", "constant"),
xfail("nn.functional.pad", "reflect"),
xfail("nn.functional.pad", "replicate"),
xfail("nn.functional.pairwise_distance"),
xfail("nn.functional.pdist"),
xfail("nn.functional.pixel_shuffle"),
xfail("nn.functional.pixel_unshuffle"),
xfail("nn.functional.poisson_nll_loss"),
xfail("nn.functional.prelu"),
xfail("nn.functional.relu6"),
xfail("nn.functional.rrelu"),
xfail("nn.functional.selu"),
xfail("nn.functional.silu"),
xfail("nn.functional.smooth_l1_loss"),
xfail("nn.functional.soft_margin_loss"),
xfail("nn.functional.softplus"),
xfail("nn.functional.softshrink"),
xfail("nn.functional.threshold"),
xfail("nn.functional.triplet_margin_loss"),
xfail("nn.functional.triplet_margin_with_distance_loss"),
xfail("nn.functional.unfold"),
xfail("nn.functional.upsample_bilinear"),
xfail("nn.functional.upsample_nearest"),
xfail("nonzero"),
xfail("norm"),
xfail("norm", "fro"),
xfail("norm", "inf"),
xfail("norm", "nuc"),
xfail("normal"),
xfail("normal", "number_mean"),
xfail("ormqr"),
xfail("ones"),
xfail("pca_lowrank"),
xfail("pinverse"),
xfail("polar"),
xfail("put"),
xfail("qr"),
xfail("quantile"),
xfail("rand_like"),
xfail("randint_like"),
xfail("randint"),
xfail("randn"),
xfail("randn_like"),
xfail("renorm"),
xfail("repeat_interleave"),
xfail("resize_"),
xfail("resize_as_"),
xfail("roll"),
xfail("rot90"),
xfail("rsub"),
xfail("scalar_tensor"),
xfail("scatter_add"),
xfail("scatter"),
xfail("scatter_reduce", "amax"),
xfail("scatter_reduce", "amin"),
xfail("scatter_reduce", "mean"),
xfail("scatter_reduce", "prod"),
xfail("scatter_reduce", "sum"),
xfail("searchsorted"),
xfail("select"),
xfail("select_scatter"),
xfail("sort"),
xfail("sparse.sampled_addmm"),
xfail("sparse.mm", "reduce"),
xfail("special.airy_ai"),
xfail("special.bessel_j0"),
xfail("special.bessel_j1"),
xfail("special.bessel_y0"),
xfail("special.bessel_y1"),
xfail("special.chebyshev_polynomial_t"),
xfail("special.chebyshev_polynomial_u"),
xfail("special.entr"),
xfail("special.erfcx"),
xfail("special.hermite_polynomial_h"),
xfail("special.hermite_polynomial_he"),
xfail("special.i0e"),
xfail("special.i1"),
xfail("special.i1e"),
xfail("special.laguerre_polynomial_l"),
xfail("special.log_ndtr"),
xfail("special.modified_bessel_i0"),
xfail("special.modified_bessel_i1"),
xfail("special.modified_bessel_k0"),
xfail("special.modified_bessel_k1"),
xfail("special.ndtri"),
xfail("special.scaled_modified_bessel_k0"),
xfail("special.scaled_modified_bessel_k1"),
xfail("special.spherical_bessel_j0"),
xfail("special.xlog1py"),
xfail("special.zeta"),
xfail("squeeze", "multiple"),
xfail("signal.windows.bartlett"),
xfail("signal.windows.blackman"),
xfail("signal.windows.cosine"),
xfail("signal.windows.exponential"),
xfail("signal.windows.gaussian"),
xfail("signal.windows.general_cosine"),
xfail("signal.windows.general_hamming"),
xfail("signal.windows.hamming"),
xfail("signal.windows.hann"),
xfail("signal.windows.nuttall"),
xfail("signal.windows.kaiser"),
xfail("stack"),
xfail("std"),
xfail("std", "unbiased"),
xfail("std_mean"),
xfail("std_mean", "unbiased"),
xfail("stft"),
xfail("svd"),
xfail("svd_lowrank"),
xfail("t"),
xfail("take_along_dim"),
xfail("take"),
xfail("tensor_split"),
xfail("to_sparse"),
xfail("topk"),
xfail("trace"),
xfail("trapezoid"),
xfail("trapz"),
xfail("triangular_solve"),
xfail("tril"),
xfail("triu"),
xfail("unbind"),
xfail("unfold"),
xfail("unfold_copy"),
xfail("uniform"),
xfail("unflatten"),
xfail("unique_consecutive"),
xfail("unique"),
xfail("unsafe_split"),
xfail("var_mean"),
xfail("var_mean", "unbiased"),
xfail("vdot"),
xfail("view_copy"),
xfail("view_as_complex"),
xfail("where"),
xfail("zeros"),
# ops inside this might even fail without dtensor
# tests, as we rescale op db common test size factor (i.e. L, M, S)
# which triggered the orignal function run failures with input
# generation becomes wrong, we skip them for now but should enable later.
# TODO: need to clean this list and remove all cases
skip("argwhere"),
skip("cumprod"),
skip("__rmatmul__"),
skip("meshgrid", "list_of_tensors"),
skip("meshgrid", "variadic_tensors"),
skip("nn.functional.scaled_dot_product_attention"),
skip("nn.functional.softmin"),
skip("nn.functional.embedding"),
skip("nn.functional.embedding_bag"),
skip("nn.functional.feature_alpha_dropout", "with_train"),
skip("nn.functional.feature_alpha_dropout", "without_train"),
skip("nn.functional.hinge_embedding_loss"),
skip("nn.functional.cosine_embedding_loss"),
skip("fft.hfft"),
skip("fft.hfft2"),
skip("fft.hfft2"),
skip("fft.hfftn"),
skip("fft.ifftn"),
skip("fft.irfft"),
skip("istft"),
skip("isclose"),
skip("isreal"),
skip("matmul"),
skip("masked.mean"),
skip("masked.var"),
skip("masked.std"),
skip("masked.normalize"),
skip("prod"),
skip("_segment_reduce", "lengths"),
skip("_segment_reduce", "offsets"),
# TODO: fix the following ops
skip("squeeze"),
}
skip_bw = [
None, # corresponds to the transpose ops 'H' and 'T'
"torch.bucketize",
"torch.conj_physical",
"torch.eq",
"torch.isfinite",
"torch.isnan",
]
OP_DB_WORLD_SIZE = 4
DEVICE_TYPE = "cpu"
class TestDTensorOps(DTensorOpTestBase):
|
import unittest
import warnings
import torch
import torch.distributed as dist
import torch.testing._internal.common_methods_invocations as common_ops
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.overrides import resolve_name
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import DecorateInfo, op_db
from torch.testing._internal.common_utils import (
run_tests,
suppress_warnings,
TEST_WITH_ASAN,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorConverter,
DTensorOpTestBase,
)
from torch.utils import _pytree as pytree
from torch.utils._pytree import tree_map
common_ops.L = 24
common_ops.M = 12
common_ops.S = 4
common_ops.XS = 2
dtensor_fails = {
# these sometimes pass and sometimes fail
# we need to remove many of them from list once op
# get full support with varying sharding specs
xfail("__getitem__"),
xfail("__rsub__"),
xfail("_chunk_cat"),
xfail("_native_batch_norm_legit"),
xfail("_upsample_bilinear2d_aa"),
xfail("addbmm"),
xfail("addmv"),
xfail("addr"),
xfail("all"),
xfail("allclose"),
xfail("alias_copy"),
xfail("amax"),
xfail("amin"),
xfail("aminmax"),
xfail("any"),
xfail("arange"),
xfail("argmax"),
xfail("argmin"),
xfail("argsort"),
xfail("as_strided"),
xfail("as_strided", "partial_views"),
xfail("as_strided_copy"),
xfail("as_strided_scatter"),
xfail("bernoulli"),
xfail("_batch_norm_with_update"),
xfail("block_diag"),
xfail("broadcast_shapes"),
xfail("cauchy"),
xfail("cdist"),
xfail("cholesky"),
xfail("cholesky_inverse"),
xfail("cholesky_solve"),
xfail("chunk"),
xfail("clamp"),
xfail("clamp_max"),
xfail("clamp_min"),
xfail("combinations"),
xfail("complex"),
xfail("constant_pad_nd"),
xfail("count_nonzero"),
xfail("cross"),
xfail("cummax"),
xfail("cummin"),
xfail("cumsum"),
xfail("cumulative_trapezoid"),
xfail("diagonal_scatter"),
xfail("dist"),
xfail("dot"),
xfail("empty"),
xfail("empty_strided"),
xfail("empty_like"),
xfail("empty_permuted"),
xfail("expand_copy"),
xfail("exponential"),
xfail("equal"),
xfail("eye"),
xfail("fft.fft2"),
xfail("fft.fft"),
xfail("fft.fftn"),
xfail("fft.fftshift"),
xfail("fft.ifft2"),
xfail("fft.ifft"),
xfail("fft.ifftshift"),
xfail("fft.ihfft2"),
xfail("fft.ihfft"),
xfail("fft.ihfftn"),
xfail("fft.irfft2"),
xfail("fft.irfftn"),
xfail("fft.rfft2"),
xfail("fft.rfft"),
xfail("fft.rfftn"),
xfail("fill"),
xfail("flip"),
xfail("fliplr"),
xfail("flipud"),
xfail("floor_divide"),
xfail("fmax"),
xfail("fmin"),
xfail("frexp"),
xfail("full"),
xfail("full_like"),
xfail("gather"),
xfail("geometric"),
xfail("geqrf"),
xfail("grid_sampler_2d"),
xfail("gradient"),
xfail("heaviside"),
xfail("histc"),
xfail("histogram"),
xfail("histogramdd"),
xfail("index_add"),
xfail("index_copy"),
xfail("index_fill"),
xfail("index_put"),
xfail("index_reduce", "prod"),
xfail("index_reduce", "mean"),
xfail("index_reduce", "amax"),
xfail("index_reduce", "amin"),
xfail("index_select"),
xfail("isin"),
xfail("kthvalue"),
xfail("linalg.cholesky"),
xfail("linalg.cholesky_ex"),
xfail("linalg.cross"),
xfail("linalg.det"),
xfail("linalg.det", "singular"),
xfail("linalg.eig"),
xfail("linalg.eigvals"),
xfail("linalg.householder_product"),
xfail("linalg.inv"),
xfail("linalg.inv_ex"),
xfail("linalg.ldl_factor"),
xfail("linalg.ldl_factor_ex"),
xfail("linalg.ldl_solve"),
xfail("linalg.lstsq"),
xfail("linalg.lstsq", "grad_oriented"),
xfail("linalg.lu"),
xfail("linalg.lu_factor"),
xfail("linalg.lu_factor_ex"),
xfail("linalg.lu_solve"),
xfail("linalg.matrix_norm"),
xfail("linalg.matrix_power"),
xfail("linalg.matrix_rank"),
xfail("linalg.matrix_rank", "hermitian"),
xfail("linalg.multi_dot"),
xfail("linalg.norm"),
xfail("linalg.norm", "subgradients_at_zero"),
xfail("linalg.pinv"),
xfail("linalg.pinv", "hermitian"),
xfail("linalg.slogdet"),
xfail("linalg.solve"),
xfail("linalg.solve_ex"),
xfail("linalg.solve_triangular"),
xfail("linalg.tensorinv"),
xfail("linalg.tensorsolve"),
xfail("linalg.vander"),
xfail("linalg.vecdot"),
xfail("linspace"),
xfail("linspace", "tensor_overload"),
xfail("log_normal"),
xfail("logcumsumexp"),
xfail("logdet"),
xfail("logspace"),
xfail("logspace", "tensor_overload"),
xfail("logsumexp"),
xfail("lu"),
xfail("lu_solve"),
xfail("lu_unpack"),
xfail("masked_fill"),
xfail("masked_scatter"),
xfail("masked_select"),
xfail("masked.amax"),
xfail("masked.amin"),
xfail("masked.argmax"),
xfail("masked.argmin"),
xfail("masked.cumprod"),
xfail("masked.cumsum"),
xfail("masked.logsumexp"),
xfail("masked.median"),
xfail("matrix_exp"),
xfail("max", "binary"),
xfail("max", "reduction_with_dim"),
xfail("maximum"),
xfail("median"),
xfail("min", "binary"),
xfail("min", "reduction_with_dim"),
xfail("minimum"),
xfail("mode"),
xfail("msort"),
xfail("multinomial"),
xfail("mv"),
xfail("max_pool2d_with_indices_backward", ""),
xfail("nanmean"),
xfail("nanmedian"),
xfail("nanquantile"),
xfail("nansum"),
xfail("native_batch_norm"),
xfail("native_dropout_backward"),
xfail("narrow_copy"),
xfail("ne"),
xfail("new_empty"),
xfail("new_empty_strided"),
xfail("transpose"),
xfail("nn.functional.adaptive_avg_pool1d"),
xfail("nn.functional.adaptive_avg_pool2d"),
xfail("nn.functional.adaptive_avg_pool3d"),
xfail("nn.functional.adaptive_max_pool1d"),
xfail("nn.functional.adaptive_max_pool2d"),
xfail("nn.functional.adaptive_max_pool3d"),
xfail("nn.functional.alpha_dropout"),
xfail("nn.functional.avg_pool1d"),
xfail("nn.functional.avg_pool2d"),
xfail("nn.functional.avg_pool3d"),
xfail("nn.functional.batch_norm"),
xfail("nn.functional.batch_norm", "without_cudnn"),
xfail("nn.functional.bilinear"),
xfail("nn.functional.binary_cross_entropy"),
xfail("nn.functional.binary_cross_entropy_with_logits"),
xfail("nn.functional.celu"),
xfail("nn.functional.conv1d"),
xfail("nn.functional.conv2d"),
xfail("nn.functional.conv3d"),
xfail("nn.functional.conv_transpose1d"),
xfail("nn.functional.conv_transpose2d"),
xfail("nn.functional.conv_transpose3d"),
xfail("nn.functional.cosine_similarity"),
xfail("nn.functional.ctc_loss"),
xfail("nn.functional.dropout"),
xfail("nn.functional.dropout2d"),
xfail("nn.functional.dropout3d"),
xfail("nn.functional.elu"),
xfail("nn.functional.fractional_max_pool2d"),
xfail("nn.functional.fractional_max_pool3d"),
xfail("nn.functional.glu"),
xfail("nn.functional.grid_sample"),
xfail("nn.functional.group_norm"),
xfail("nn.functional.hardshrink"),
xfail("nn.functional.hardsigmoid"),
xfail("nn.functional.hardswish"),
xfail("nn.functional.hardtanh"),
xfail("nn.functional.huber_loss"),
xfail("nn.functional.instance_norm"),
xfail("nn.functional.interpolate", "area"),
xfail("nn.functional.interpolate", "bicubic"),
xfail("nn.functional.interpolate", "bilinear"),
xfail("nn.functional.interpolate", "linear"),
xfail("nn.functional.interpolate", "nearest"),
xfail("nn.functional.interpolate", "nearest-exact"),
xfail("nn.functional.interpolate", "trilinear"),
xfail("nn.functional.leaky_relu"),
xfail("nn.functional.linear"),
xfail("nn.functional.local_response_norm"),
xfail("nn.functional.logsigmoid"),
xfail("nn.functional.margin_ranking_loss"),
xfail("nn.functional.max_pool1d"),
xfail("nn.functional.max_pool2d"),
xfail("nn.functional.max_pool3d"),
xfail("nn.functional.max_unpool1d"),
xfail("nn.functional.max_unpool1d", "grad"),
xfail("nn.functional.max_unpool2d"),
xfail("nn.functional.max_unpool2d", "grad"),
xfail("nn.functional.max_unpool3d"),
xfail("nn.functional.max_unpool3d", "grad"),
xfail("nn.functional.mish"),
xfail("nn.functional.mse_loss"),
xfail("nn.functional.multi_margin_loss"),
xfail("nn.functional.multi_head_attention_forward"),
xfail("nn.functional.multilabel_margin_loss"),
xfail("nn.functional.multilabel_soft_margin_loss"),
xfail("nn.functional.normalize"),
xfail("nn.functional.pad", "constant"),
xfail("nn.functional.pad", "reflect"),
xfail("nn.functional.pad", "replicate"),
xfail("nn.functional.pad", "replicate_negative"),
xfail("nn.functional.pairwise_distance"),
xfail("nn.functional.pdist"),
xfail("nn.functional.pixel_shuffle"),
xfail("nn.functional.pixel_unshuffle"),
xfail("nn.functional.prelu"),
xfail("nn.functional.relu6"),
xfail("nn.functional.rrelu"),
xfail("nn.functional.selu"),
xfail("nn.functional.smooth_l1_loss"),
xfail("nn.functional.soft_margin_loss"),
xfail("nn.functional.softplus"),
xfail("nn.functional.softshrink"),
xfail("nn.functional.threshold"),
xfail("nn.functional.triplet_margin_loss"),
xfail("nn.functional.triplet_margin_with_distance_loss"),
xfail("nn.functional.unfold"),
xfail("nn.functional.upsample_bilinear"),
xfail("nn.functional.upsample_nearest"),
xfail("nonzero"),
xfail("normal"),
xfail("normal", "number_mean"),
xfail("normal", "in_place"),
xfail("ormqr"),
xfail("ones"),
xfail("pca_lowrank"),
xfail("pinverse"),
xfail("polar"),
xfail("put"),
xfail("quantile"),
xfail("rand_like"),
xfail("randint_like"),
xfail("randint"),
xfail("randn"),
xfail("randn_like"),
xfail("renorm"),
xfail("repeat_interleave"),
xfail("resize_"),
xfail("resize_as_"),
xfail("roll"),
xfail("rot90"),
xfail("rsub"),
xfail("scalar_tensor"),
xfail("scatter_add"),
xfail("scatter_reduce", "amax"),
xfail("scatter_reduce", "amin"),
xfail("scatter_reduce", "mean"),
xfail("scatter_reduce", "prod"),
xfail("scatter_reduce", "sum"),
xfail("searchsorted"),
xfail("select"),
xfail("select_scatter"),
xfail("sort"),
xfail("sparse.sampled_addmm"),
xfail("sparse.mm", "reduce"),
xfail("special.airy_ai"),
xfail("special.bessel_j0"),
xfail("special.bessel_j1"),
xfail("special.bessel_y0"),
xfail("special.bessel_y1"),
xfail("special.chebyshev_polynomial_t"),
xfail("special.chebyshev_polynomial_u"),
xfail("special.entr"),
xfail("special.erfcx"),
xfail("special.hermite_polynomial_h"),
xfail("special.hermite_polynomial_he"),
xfail("special.i0e"),
xfail("special.i1"),
xfail("special.i1e"),
xfail("special.laguerre_polynomial_l"),
xfail("special.log_ndtr"),
xfail("special.modified_bessel_i0"),
xfail("special.modified_bessel_i1"),
xfail("special.modified_bessel_k0"),
xfail("special.modified_bessel_k1"),
xfail("special.ndtri"),
xfail("special.scaled_modified_bessel_k0"),
xfail("special.scaled_modified_bessel_k1"),
xfail("special.spherical_bessel_j0"),
xfail("special.xlog1py"),
xfail("special.zeta"),
xfail("squeeze", "multiple"),
xfail("signal.windows.bartlett"),
xfail("signal.windows.blackman"),
xfail("signal.windows.cosine"),
xfail("signal.windows.exponential"),
xfail("signal.windows.gaussian"),
xfail("signal.windows.general_cosine"),
xfail("signal.windows.general_hamming"),
xfail("signal.windows.hamming"),
xfail("signal.windows.hann"),
xfail("signal.windows.nuttall"),
xfail("signal.windows.kaiser"),
xfail("stack"),
xfail("std"),
xfail("std", "unbiased"),
xfail("std_mean"),
xfail("std_mean", "unbiased"),
xfail("stft"),
xfail("svd_lowrank"),
xfail("t_copy"),
xfail("take"),
xfail("tensor_split"),
xfail("to_sparse"),
xfail("trace"),
xfail("trapezoid"),
xfail("trapz"),
xfail("triangular_solve"),
xfail("unbind"),
xfail("unfold"),
xfail("unfold_copy"),
xfail("uniform"),
xfail("unflatten"),
xfail("unique_consecutive"),
xfail("unique"),
xfail("unsafe_split"),
xfail("unsafe_chunk"),
xfail("_unsafe_masked_index"),
xfail("_unsafe_masked_index_put_accumulate"),
xfail("var_mean"),
xfail("var_mean", "unbiased"),
xfail("vdot"),
xfail("view_copy"),
xfail("zeros"),
# ops inside this might even fail without dtensor
# tests, as we rescale op db common test size factor (i.e. L, M, S)
# which triggered the original function run failures with input
# generation becomes wrong, we skip them for now but should enable later.
# TODO: need to clean this list and remove all cases
skip("argwhere"),
skip("cumprod"),
skip("__rmatmul__"),
skip("meshgrid", "list_of_tensors"),
skip("meshgrid", "variadic_tensors"),
skip("nn.functional.scaled_dot_product_attention"),
skip("nn.functional.softmin"),
skip("nn.functional.embedding"),
skip("nn.functional.embedding_bag"),
skip("nn.functional.feature_alpha_dropout", "with_train"),
skip("nn.functional.feature_alpha_dropout", "without_train"),
skip("nn.functional.hinge_embedding_loss"),
skip("nn.functional.cosine_embedding_loss"),
skip("fft.hfft"),
skip("fft.hfft2"),
skip("fft.hfft2"),
skip("fft.hfftn"),
skip("fft.ifftn"),
skip("fft.irfft"),
skip("istft"),
skip("isclose"),
skip("isreal"),
skip("matmul"),
skip("masked.mean"),
skip("masked.var"),
skip("masked.std"),
skip("masked.normalize"),
skip("prod"),
skip("_segment_reduce", "lengths"),
skip("_segment_reduce", "offsets"),
# TODO: fix the following ops
skip("squeeze"),
}
skip_bw = [
None, # corresponds to the transpose ops 'H' and 'T'
"torch.bucketize",
"torch.conj_physical",
"torch.eq",
"torch.isfinite",
"torch.isnan",
]
OP_DB_WORLD_SIZE = 4
DEVICE_TYPE = "cpu"
class TestDTensorOps(DTensorOpTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_tensor/test_dtensor_ops.py
|
run_dtensor_crossref
|
def run_dtensor_crossref(self, func, args, kwargs):
to_dtensor = DTensorConverter(self.mesh, args, kwargs)
def concat_res_if_necessary(func, res: object) -> object:
# concat the result on corresponding dim for ops like
# split, so that we can call backward on a single tensor
if (
(resolve_name(func) is not None)
and ("split" in resolve_name(func))
):
dim = args[2] if len(args) == 3 else 0
return torch.cat(res, dim=dim)
else:
return res
# TODO: also handle cases where func raise an exception
rs = func(*args, **kwargs)
rs = concat_res_if_necessary(func, rs)
def to_replicate(e: object) -> object:
return (
e.redistribute(self.mesh, self.mesh.ndim * [Replicate()])
if isinstance(e, DTensor)
else e
)
try:
# Suppress warnings, this doesn't matter for test_meta.py
# but it does matter if you want to use this decorator
# for cross-ref testing, as some tests may be looking at
# errors
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# for every comb of sharding choices, we test if it works
for dtensor_args, dtensor_kwargs in to_dtensor:
# Only attempt if we managed to convert all tensors to DTensor
# (if any of them failed, we're in a mixed tensor situation and
# this is not allowed in DTensor)
if to_dtensor.successful():
# Handle special cases first if there's any
# Suppress warnings, this doesn't matter for test_meta.py
# but it does matter if you want to use this decorator
# for cross-ref testing, as some tests may be looking at
# errors
dtensor_rs = func(*dtensor_args, **dtensor_kwargs)
# we need to skip tests containing tensors of zero elmeents for now.
# see issue: https://github.com/pytorch/tau/issues/470
# TODO remove this once issue above fixed.
flat_args, _ = tree_flatten(dtensor_rs)
if any(
isinstance(e, torch.Tensor) and e.numel() == 0
for e in flat_args
):
continue
# redistribute/all_gather the results to compare with normal output
dtensor_rs = tree_map(to_replicate, dtensor_rs)
dtensor_rs = concat_res_if_necessary(func, dtensor_rs)
try:
if resolve_name(func) not in skip_bw:
if isinstance(dtensor_rs, DTensor):
dtensor_rs.to_local().sum().backward()
elif isinstance(dtensor_rs, tuple):
dtensor_rs[0].to_local().sum().backward()
except Exception as e:
# TODO(anj): Remove this guard exception after gaining more confidence.
if torch.distributed.get_rank() == 0:
print(
f"failed to run BW: {resolve_name(func)}, {func}, {str(e)})"
)
self.assert_ref_dtensor_equal(dtensor_rs, rs)
else:
raise RuntimeError(
f"failed to convert args to DTensor; "
f"originally (*{args}, **{kwargs})"
)
except Exception as e:
raise RuntimeError(
f"failed to run: {resolve_name(func)}, with (*{args}, **{kwargs})"
) from e
return rs
|
def run_dtensor_crossref(self, func, args, kwargs):
to_dtensor = DTensorConverter(self.mesh, args, kwargs)
def concat_res_if_necessary(func, res: object) -> object:
# concat the result on corresponding dim for ops like
# split, so that we can call backward on a single tensor
if (resolve_name(func) is not None) and ("split" in resolve_name(func)):
dim = args[2] if len(args) == 3 else 0
return torch.cat(res, dim=dim)
else:
return res
# TODO: also handle cases where func raise an exception
rs = func(*args, **kwargs)
rs = concat_res_if_necessary(func, rs)
def to_replicate(e: object) -> object:
return e.full_tensor() if isinstance(e, DTensor) else e
try:
# Suppress warnings, this doesn't matter for test_meta.py
# but it does matter if you want to use this decorator
# for cross-ref testing, as some tests may be looking at
# errors
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# for every comb of sharding choices, we test if it works
for dtensor_args, dtensor_kwargs in to_dtensor:
# Only attempt if we managed to convert all tensors to DTensor
# (if any of them failed, we're in a mixed tensor situation and
# this is not allowed in DTensor)
if to_dtensor.successful():
# Handle special cases first if there's any
# Suppress warnings, this doesn't matter for test_meta.py
# but it does matter if you want to use this decorator
# for cross-ref testing, as some tests may be looking at
# errors
dtensor_rs = func(*dtensor_args, **dtensor_kwargs)
# we need to skip tests containing tensors of zero elements for now.
# see issue: https://github.com/pytorch/tau/issues/470
# TODO remove this once issue above fixed.
flat_args = pytree.tree_leaves(dtensor_rs)
if any(
isinstance(e, torch.Tensor) and e.numel() == 0
for e in flat_args
):
continue
# redistribute/all_gather the results to compare with normal output
dtensor_rs = tree_map(to_replicate, dtensor_rs)
dtensor_rs = concat_res_if_necessary(func, dtensor_rs)
try:
if resolve_name(func) not in skip_bw:
if isinstance(dtensor_rs, DTensor):
dtensor_rs.to_local().sum().backward()
elif isinstance(dtensor_rs, tuple):
dtensor_rs[0].to_local().sum().backward()
except Exception as e:
# TODO(anj): Remove this guard exception after gaining more confidence.
if torch.distributed.get_rank() == 0:
print(
f"failed to run BW: {resolve_name(func)}, {func}, {str(e)})"
)
self.assert_ref_dtensor_equal(dtensor_rs, rs)
else:
raise RuntimeError(
f"failed to convert args to DTensor; "
f"originally (*{args}, **{kwargs})"
)
except Exception as e:
raise RuntimeError(
f"failed to run: {resolve_name(func)}, with (*{args}, **{kwargs})"
) from e
return rs
|
import unittest
import warnings
import torch
import torch.distributed as dist
import torch.testing._internal.common_methods_invocations as common_ops
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate
from torch.overrides import resolve_name
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import (
DecorateInfo,
op_db,
)
from torch.testing._internal.common_utils import (
run_tests,
suppress_warnings,
TEST_WITH_ASAN,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorConverter,
DTensorOpTestBase,
)
from torch.utils._pytree import tree_flatten, tree_map
common_ops.L = 24
common_ops.M = 12
common_ops.S = 4
common_ops.XS = 2
dtensor_fails = {
# these sometimes pass and sometimes fail
# we need to remove many of them from list once op
# get full support with varying sharding specs
xfail("__getitem__"),
xfail("__rsub__"),
xfail("_native_batch_norm_legit"),
xfail("_softmax_backward_data"),
xfail("addbmm"),
xfail("addmv"),
xfail("addr"),
xfail("all"),
xfail("allclose"),
xfail("amax"),
xfail("amin"),
xfail("aminmax"),
xfail("any"),
xfail("arange"),
xfail("argmax"),
xfail("argmin"),
xfail("argsort"),
xfail("as_strided"),
xfail("as_strided", "partial_views"),
xfail("as_strided_scatter"),
xfail("baddbmm"),
xfail("bernoulli"),
xfail("block_diag"),
xfail("broadcast_shapes"),
xfail("cauchy"),
xfail("cartesian_prod"),
xfail("cdist"),
xfail("cholesky"),
xfail("cholesky_inverse"),
xfail("cholesky_solve"),
xfail("chunk"),
xfail("clamp"),
xfail("clamp_max"),
xfail("clamp_min"),
xfail("combinations"),
xfail("complex"),
xfail("constant_pad_nd"),
xfail("corrcoef"),
xfail("count_nonzero"),
xfail("cov"),
xfail("cross"),
xfail("cummax"),
xfail("cummin"),
xfail("cumsum"),
xfail("cumulative_trapezoid"),
xfail("diag"),
xfail("diag_embed"),
xfail("diagflat"),
xfail("diagonal"),
xfail("diagonal_copy"),
xfail("diagonal_scatter"),
xfail("dist"),
xfail("dot"),
xfail("einsum"),
xfail("empty"),
xfail("empty_like"),
xfail("exponential"),
xfail("eye"),
xfail("fft.fft2"),
xfail("fft.fft"),
xfail("fft.fftn"),
xfail("fft.fftshift"),
xfail("fft.ifft2"),
xfail("fft.ifft"),
xfail("fft.ifftshift"),
xfail("fft.ihfft2"),
xfail("fft.ihfft"),
xfail("fft.ihfftn"),
xfail("fft.irfft2"),
xfail("fft.irfftn"),
xfail("fft.rfft2"),
xfail("fft.rfft"),
xfail("fft.rfftn"),
xfail("fill"),
xfail("flip"),
xfail("fliplr"),
xfail("flipud"),
xfail("floor_divide"),
xfail("fmax"),
xfail("fmin"),
xfail("frexp"),
xfail("full"),
xfail("full_like"),
xfail("gather"),
xfail("geometric"),
xfail("geqrf"),
xfail("grid_sampler_2d"),
xfail("gradient"),
xfail("heaviside"),
xfail("histc"),
xfail("histogram"),
xfail("histogramdd"),
xfail("index_add"),
xfail("index_copy"),
xfail("index_fill"),
xfail("index_put"),
xfail("index_reduce"),
xfail("index_select"),
xfail("isin"),
xfail("isinf"),
xfail("isneginf"),
xfail("isposinf"),
xfail("kthvalue"),
xfail("linalg.cholesky"),
xfail("linalg.cholesky_ex"),
xfail("linalg.cond"),
xfail("linalg.cross"),
xfail("linalg.det"),
xfail("linalg.det", "singular"),
xfail("linalg.eig"),
xfail("linalg.eigh"),
xfail("linalg.eigvals"),
xfail("linalg.eigvalsh"),
xfail("linalg.householder_product"),
xfail("linalg.inv"),
xfail("linalg.inv_ex"),
xfail("linalg.ldl_factor"),
xfail("linalg.ldl_factor_ex"),
xfail("linalg.ldl_solve"),
xfail("linalg.lstsq"),
xfail("linalg.lstsq", "grad_oriented"),
xfail("linalg.lu"),
xfail("linalg.lu_factor"),
xfail("linalg.lu_factor_ex"),
xfail("linalg.lu_solve"),
xfail("linalg.matrix_norm"),
xfail("linalg.matrix_power"),
xfail("linalg.matrix_rank"),
xfail("linalg.matrix_rank", "hermitian"),
xfail("linalg.multi_dot"),
xfail("linalg.norm"),
xfail("linalg.norm", "subgradients_at_zero"),
xfail("linalg.pinv"),
xfail("linalg.pinv", "hermitian"),
xfail("linalg.qr"),
xfail("linalg.slogdet"),
xfail("linalg.solve"),
xfail("linalg.solve_ex"),
xfail("linalg.solve_triangular"),
xfail("linalg.svd"),
xfail("linalg.svdvals"),
xfail("linalg.tensorinv"),
xfail("linalg.tensorsolve"),
xfail("linalg.vander"),
xfail("linalg.vecdot"),
xfail("linalg.vector_norm"),
xfail("linspace"),
xfail("log_normal"),
xfail("log_softmax"),
xfail("log_softmax", "with_dtype"),
xfail("logcumsumexp"),
xfail("logdet"),
xfail("logspace"),
xfail("logsumexp"),
xfail("lt"),
xfail("lu"),
xfail("lu_solve"),
xfail("lu_unpack"),
xfail("masked_fill"),
xfail("masked_scatter"),
xfail("masked_select"),
xfail("masked.amax"),
xfail("masked.amin"),
xfail("masked.argmax"),
xfail("masked.argmin"),
xfail("masked.cumprod"),
xfail("masked.cumsum"),
xfail("masked.log_softmax"),
xfail("masked.logaddexp"),
xfail("masked.logsumexp"),
xfail("masked.median"),
xfail("masked.norm"),
xfail("masked.prod"),
xfail("masked.softmin"),
xfail("masked.softmax"),
xfail("masked.sum"),
xfail("matrix_exp"),
xfail("max", "binary"),
xfail("max", "reduction_no_dim"),
xfail("max", "reduction_with_dim"),
xfail("maximum"),
xfail("median"),
xfail("min", "binary"),
xfail("min", "reduction_no_dim"),
xfail("min", "reduction_with_dim"),
xfail("minimum"),
xfail("mode"),
xfail("msort"),
xfail("multinomial"),
xfail("mv"),
xfail("max_pool2d_with_indices_backward", ""),
xfail("nanmean"),
xfail("nanmedian"),
xfail("nanquantile"),
xfail("nansum"),
xfail("native_batch_norm"),
xfail("native_dropout_backward"),
xfail("native_layer_norm"),
xfail("narrow_copy"),
xfail("ne"),
xfail("new_empty"),
xfail("new_empty_strided"),
xfail("transpose"),
xfail("nn.functional.adaptive_avg_pool1d"),
xfail("nn.functional.adaptive_avg_pool2d"),
xfail("nn.functional.adaptive_avg_pool3d"),
xfail("nn.functional.adaptive_max_pool1d"),
xfail("nn.functional.adaptive_max_pool2d"),
xfail("nn.functional.adaptive_max_pool3d"),
xfail("nn.functional.alpha_dropout"),
xfail("nn.functional.avg_pool1d"),
xfail("nn.functional.avg_pool2d"),
xfail("nn.functional.avg_pool3d"),
xfail("nn.functional.batch_norm"),
xfail("nn.functional.batch_norm", "without_cudnn"),
xfail("nn.functional.bilinear"),
xfail("nn.functional.binary_cross_entropy"),
xfail("nn.functional.binary_cross_entropy_with_logits"),
xfail("nn.functional.celu"),
xfail("nn.functional.conv1d"),
xfail("nn.functional.conv2d"),
xfail("nn.functional.conv_transpose1d"),
xfail("nn.functional.conv_transpose2d"),
xfail("nn.functional.conv_transpose3d"),
xfail("nn.functional.cosine_similarity"),
xfail("nn.functional.cross_entropy"),
xfail("nn.functional.ctc_loss"),
xfail("nn.functional.dropout"),
xfail("nn.functional.dropout2d"),
xfail("nn.functional.dropout3d"),
xfail("nn.functional.elu"),
xfail("nn.functional.fractional_max_pool2d"),
xfail("nn.functional.fractional_max_pool3d"),
xfail("nn.functional.gaussian_nll_loss"),
xfail("nn.functional.glu"),
xfail("nn.functional.grid_sample"),
xfail("nn.functional.group_norm"),
xfail("nn.functional.hardshrink"),
xfail("nn.functional.hardsigmoid"),
xfail("nn.functional.hardswish"),
xfail("nn.functional.hardtanh"),
xfail("nn.functional.huber_loss"),
xfail("nn.functional.instance_norm"),
xfail("nn.functional.interpolate", "area"),
xfail("nn.functional.interpolate", "bicubic"),
xfail("nn.functional.interpolate", "bilinear"),
xfail("nn.functional.interpolate", "linear"),
xfail("nn.functional.interpolate", "nearest"),
xfail("nn.functional.interpolate", "trilinear"),
xfail("nn.functional.layer_norm"),
xfail("nn.functional.leaky_relu"),
xfail("nn.functional.linear"),
xfail("nn.functional.local_response_norm"),
xfail("nn.functional.logsigmoid"),
xfail("nn.functional.margin_ranking_loss"),
xfail("nn.functional.max_pool1d"),
xfail("nn.functional.max_pool2d"),
xfail("nn.functional.max_pool3d"),
xfail("nn.functional.max_unpool1d"),
xfail("nn.functional.max_unpool1d", "grad"),
xfail("nn.functional.max_unpool2d"),
xfail("nn.functional.max_unpool2d", "grad"),
xfail("nn.functional.max_unpool3d"),
xfail("nn.functional.max_unpool3d", "grad"),
xfail("nn.functional.mish"),
xfail("nn.functional.mse_loss"),
xfail("nn.functional.multi_margin_loss"),
xfail("nn.functional.multilabel_margin_loss"),
xfail("nn.functional.multilabel_soft_margin_loss"),
xfail("nn.functional.nll_loss"),
xfail("nn.functional.normalize"),
xfail("nn.functional.pad", "circular"),
xfail("nn.functional.pad", "constant"),
xfail("nn.functional.pad", "reflect"),
xfail("nn.functional.pad", "replicate"),
xfail("nn.functional.pairwise_distance"),
xfail("nn.functional.pdist"),
xfail("nn.functional.pixel_shuffle"),
xfail("nn.functional.pixel_unshuffle"),
xfail("nn.functional.poisson_nll_loss"),
xfail("nn.functional.prelu"),
xfail("nn.functional.relu6"),
xfail("nn.functional.rrelu"),
xfail("nn.functional.selu"),
xfail("nn.functional.silu"),
xfail("nn.functional.smooth_l1_loss"),
xfail("nn.functional.soft_margin_loss"),
xfail("nn.functional.softplus"),
xfail("nn.functional.softshrink"),
xfail("nn.functional.threshold"),
xfail("nn.functional.triplet_margin_loss"),
xfail("nn.functional.triplet_margin_with_distance_loss"),
xfail("nn.functional.unfold"),
xfail("nn.functional.upsample_bilinear"),
xfail("nn.functional.upsample_nearest"),
xfail("nonzero"),
xfail("norm"),
xfail("norm", "fro"),
xfail("norm", "inf"),
xfail("norm", "nuc"),
xfail("normal"),
xfail("normal", "number_mean"),
xfail("ormqr"),
xfail("ones"),
xfail("pca_lowrank"),
xfail("pinverse"),
xfail("polar"),
xfail("put"),
xfail("qr"),
xfail("quantile"),
xfail("rand_like"),
xfail("randint_like"),
xfail("randint"),
xfail("randn"),
xfail("randn_like"),
xfail("renorm"),
xfail("repeat_interleave"),
xfail("resize_"),
xfail("resize_as_"),
xfail("roll"),
xfail("rot90"),
xfail("rsub"),
xfail("scalar_tensor"),
xfail("scatter_add"),
xfail("scatter"),
xfail("scatter_reduce", "amax"),
xfail("scatter_reduce", "amin"),
xfail("scatter_reduce", "mean"),
xfail("scatter_reduce", "prod"),
xfail("scatter_reduce", "sum"),
xfail("searchsorted"),
xfail("select"),
xfail("select_scatter"),
xfail("sort"),
xfail("sparse.sampled_addmm"),
xfail("sparse.mm", "reduce"),
xfail("special.airy_ai"),
xfail("special.bessel_j0"),
xfail("special.bessel_j1"),
xfail("special.bessel_y0"),
xfail("special.bessel_y1"),
xfail("special.chebyshev_polynomial_t"),
xfail("special.chebyshev_polynomial_u"),
xfail("special.entr"),
xfail("special.erfcx"),
xfail("special.hermite_polynomial_h"),
xfail("special.hermite_polynomial_he"),
xfail("special.i0e"),
xfail("special.i1"),
xfail("special.i1e"),
xfail("special.laguerre_polynomial_l"),
xfail("special.log_ndtr"),
xfail("special.modified_bessel_i0"),
xfail("special.modified_bessel_i1"),
xfail("special.modified_bessel_k0"),
xfail("special.modified_bessel_k1"),
xfail("special.ndtri"),
xfail("special.scaled_modified_bessel_k0"),
xfail("special.scaled_modified_bessel_k1"),
xfail("special.spherical_bessel_j0"),
xfail("special.xlog1py"),
xfail("special.zeta"),
xfail("squeeze", "multiple"),
xfail("signal.windows.bartlett"),
xfail("signal.windows.blackman"),
xfail("signal.windows.cosine"),
xfail("signal.windows.exponential"),
xfail("signal.windows.gaussian"),
xfail("signal.windows.general_cosine"),
xfail("signal.windows.general_hamming"),
xfail("signal.windows.hamming"),
xfail("signal.windows.hann"),
xfail("signal.windows.nuttall"),
xfail("signal.windows.kaiser"),
xfail("stack"),
xfail("std"),
xfail("std", "unbiased"),
xfail("std_mean"),
xfail("std_mean", "unbiased"),
xfail("stft"),
xfail("svd"),
xfail("svd_lowrank"),
xfail("t"),
xfail("take_along_dim"),
xfail("take"),
xfail("tensor_split"),
xfail("to_sparse"),
xfail("topk"),
xfail("trace"),
xfail("trapezoid"),
xfail("trapz"),
xfail("triangular_solve"),
xfail("tril"),
xfail("triu"),
xfail("unbind"),
xfail("unfold"),
xfail("unfold_copy"),
xfail("uniform"),
xfail("unflatten"),
xfail("unique_consecutive"),
xfail("unique"),
xfail("unsafe_split"),
xfail("var_mean"),
xfail("var_mean", "unbiased"),
xfail("vdot"),
xfail("view_copy"),
xfail("view_as_complex"),
xfail("where"),
xfail("zeros"),
# ops inside this might even fail without dtensor
# tests, as we rescale op db common test size factor (i.e. L, M, S)
# which triggered the orignal function run failures with input
# generation becomes wrong, we skip them for now but should enable later.
# TODO: need to clean this list and remove all cases
skip("argwhere"),
skip("cumprod"),
skip("__rmatmul__"),
skip("meshgrid", "list_of_tensors"),
skip("meshgrid", "variadic_tensors"),
skip("nn.functional.scaled_dot_product_attention"),
skip("nn.functional.softmin"),
skip("nn.functional.embedding"),
skip("nn.functional.embedding_bag"),
skip("nn.functional.feature_alpha_dropout", "with_train"),
skip("nn.functional.feature_alpha_dropout", "without_train"),
skip("nn.functional.hinge_embedding_loss"),
skip("nn.functional.cosine_embedding_loss"),
skip("fft.hfft"),
skip("fft.hfft2"),
skip("fft.hfft2"),
skip("fft.hfftn"),
skip("fft.ifftn"),
skip("fft.irfft"),
skip("istft"),
skip("isclose"),
skip("isreal"),
skip("matmul"),
skip("masked.mean"),
skip("masked.var"),
skip("masked.std"),
skip("masked.normalize"),
skip("prod"),
skip("_segment_reduce", "lengths"),
skip("_segment_reduce", "offsets"),
# TODO: fix the following ops
skip("squeeze"),
}
skip_bw = [
None, # corresponds to the transpose ops 'H' and 'T'
"torch.bucketize",
"torch.conj_physical",
"torch.eq",
"torch.isfinite",
"torch.isnan",
]
OP_DB_WORLD_SIZE = 4
DEVICE_TYPE = "cpu"
class TestDTensorOps(DTensorOpTestBase):
|
import unittest
import warnings
import torch
import torch.distributed as dist
import torch.testing._internal.common_methods_invocations as common_ops
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.overrides import resolve_name
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
ops,
)
from torch.testing._internal.common_methods_invocations import DecorateInfo, op_db
from torch.testing._internal.common_utils import (
run_tests,
suppress_warnings,
TEST_WITH_ASAN,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorConverter,
DTensorOpTestBase,
)
from torch.utils import _pytree as pytree
from torch.utils._pytree import tree_map
common_ops.L = 24
common_ops.M = 12
common_ops.S = 4
common_ops.XS = 2
dtensor_fails = {
# these sometimes pass and sometimes fail
# we need to remove many of them from list once op
# get full support with varying sharding specs
xfail("__getitem__"),
xfail("__rsub__"),
xfail("_chunk_cat"),
xfail("_native_batch_norm_legit"),
xfail("_upsample_bilinear2d_aa"),
xfail("addbmm"),
xfail("addmv"),
xfail("addr"),
xfail("all"),
xfail("allclose"),
xfail("alias_copy"),
xfail("amax"),
xfail("amin"),
xfail("aminmax"),
xfail("any"),
xfail("arange"),
xfail("argmax"),
xfail("argmin"),
xfail("argsort"),
xfail("as_strided"),
xfail("as_strided", "partial_views"),
xfail("as_strided_copy"),
xfail("as_strided_scatter"),
xfail("bernoulli"),
xfail("_batch_norm_with_update"),
xfail("block_diag"),
xfail("broadcast_shapes"),
xfail("cauchy"),
xfail("cdist"),
xfail("cholesky"),
xfail("cholesky_inverse"),
xfail("cholesky_solve"),
xfail("chunk"),
xfail("clamp"),
xfail("clamp_max"),
xfail("clamp_min"),
xfail("combinations"),
xfail("complex"),
xfail("constant_pad_nd"),
xfail("count_nonzero"),
xfail("cross"),
xfail("cummax"),
xfail("cummin"),
xfail("cumsum"),
xfail("cumulative_trapezoid"),
xfail("diagonal_scatter"),
xfail("dist"),
xfail("dot"),
xfail("empty"),
xfail("empty_strided"),
xfail("empty_like"),
xfail("empty_permuted"),
xfail("expand_copy"),
xfail("exponential"),
xfail("equal"),
xfail("eye"),
xfail("fft.fft2"),
xfail("fft.fft"),
xfail("fft.fftn"),
xfail("fft.fftshift"),
xfail("fft.ifft2"),
xfail("fft.ifft"),
xfail("fft.ifftshift"),
xfail("fft.ihfft2"),
xfail("fft.ihfft"),
xfail("fft.ihfftn"),
xfail("fft.irfft2"),
xfail("fft.irfftn"),
xfail("fft.rfft2"),
xfail("fft.rfft"),
xfail("fft.rfftn"),
xfail("fill"),
xfail("flip"),
xfail("fliplr"),
xfail("flipud"),
xfail("floor_divide"),
xfail("fmax"),
xfail("fmin"),
xfail("frexp"),
xfail("full"),
xfail("full_like"),
xfail("gather"),
xfail("geometric"),
xfail("geqrf"),
xfail("grid_sampler_2d"),
xfail("gradient"),
xfail("heaviside"),
xfail("histc"),
xfail("histogram"),
xfail("histogramdd"),
xfail("index_add"),
xfail("index_copy"),
xfail("index_fill"),
xfail("index_put"),
xfail("index_reduce", "prod"),
xfail("index_reduce", "mean"),
xfail("index_reduce", "amax"),
xfail("index_reduce", "amin"),
xfail("index_select"),
xfail("isin"),
xfail("kthvalue"),
xfail("linalg.cholesky"),
xfail("linalg.cholesky_ex"),
xfail("linalg.cross"),
xfail("linalg.det"),
xfail("linalg.det", "singular"),
xfail("linalg.eig"),
xfail("linalg.eigvals"),
xfail("linalg.householder_product"),
xfail("linalg.inv"),
xfail("linalg.inv_ex"),
xfail("linalg.ldl_factor"),
xfail("linalg.ldl_factor_ex"),
xfail("linalg.ldl_solve"),
xfail("linalg.lstsq"),
xfail("linalg.lstsq", "grad_oriented"),
xfail("linalg.lu"),
xfail("linalg.lu_factor"),
xfail("linalg.lu_factor_ex"),
xfail("linalg.lu_solve"),
xfail("linalg.matrix_norm"),
xfail("linalg.matrix_power"),
xfail("linalg.matrix_rank"),
xfail("linalg.matrix_rank", "hermitian"),
xfail("linalg.multi_dot"),
xfail("linalg.norm"),
xfail("linalg.norm", "subgradients_at_zero"),
xfail("linalg.pinv"),
xfail("linalg.pinv", "hermitian"),
xfail("linalg.slogdet"),
xfail("linalg.solve"),
xfail("linalg.solve_ex"),
xfail("linalg.solve_triangular"),
xfail("linalg.tensorinv"),
xfail("linalg.tensorsolve"),
xfail("linalg.vander"),
xfail("linalg.vecdot"),
xfail("linspace"),
xfail("linspace", "tensor_overload"),
xfail("log_normal"),
xfail("logcumsumexp"),
xfail("logdet"),
xfail("logspace"),
xfail("logspace", "tensor_overload"),
xfail("logsumexp"),
xfail("lu"),
xfail("lu_solve"),
xfail("lu_unpack"),
xfail("masked_fill"),
xfail("masked_scatter"),
xfail("masked_select"),
xfail("masked.amax"),
xfail("masked.amin"),
xfail("masked.argmax"),
xfail("masked.argmin"),
xfail("masked.cumprod"),
xfail("masked.cumsum"),
xfail("masked.logsumexp"),
xfail("masked.median"),
xfail("matrix_exp"),
xfail("max", "binary"),
xfail("max", "reduction_with_dim"),
xfail("maximum"),
xfail("median"),
xfail("min", "binary"),
xfail("min", "reduction_with_dim"),
xfail("minimum"),
xfail("mode"),
xfail("msort"),
xfail("multinomial"),
xfail("mv"),
xfail("max_pool2d_with_indices_backward", ""),
xfail("nanmean"),
xfail("nanmedian"),
xfail("nanquantile"),
xfail("nansum"),
xfail("native_batch_norm"),
xfail("native_dropout_backward"),
xfail("narrow_copy"),
xfail("ne"),
xfail("new_empty"),
xfail("new_empty_strided"),
xfail("transpose"),
xfail("nn.functional.adaptive_avg_pool1d"),
xfail("nn.functional.adaptive_avg_pool2d"),
xfail("nn.functional.adaptive_avg_pool3d"),
xfail("nn.functional.adaptive_max_pool1d"),
xfail("nn.functional.adaptive_max_pool2d"),
xfail("nn.functional.adaptive_max_pool3d"),
xfail("nn.functional.alpha_dropout"),
xfail("nn.functional.avg_pool1d"),
xfail("nn.functional.avg_pool2d"),
xfail("nn.functional.avg_pool3d"),
xfail("nn.functional.batch_norm"),
xfail("nn.functional.batch_norm", "without_cudnn"),
xfail("nn.functional.bilinear"),
xfail("nn.functional.binary_cross_entropy"),
xfail("nn.functional.binary_cross_entropy_with_logits"),
xfail("nn.functional.celu"),
xfail("nn.functional.conv1d"),
xfail("nn.functional.conv2d"),
xfail("nn.functional.conv3d"),
xfail("nn.functional.conv_transpose1d"),
xfail("nn.functional.conv_transpose2d"),
xfail("nn.functional.conv_transpose3d"),
xfail("nn.functional.cosine_similarity"),
xfail("nn.functional.ctc_loss"),
xfail("nn.functional.dropout"),
xfail("nn.functional.dropout2d"),
xfail("nn.functional.dropout3d"),
xfail("nn.functional.elu"),
xfail("nn.functional.fractional_max_pool2d"),
xfail("nn.functional.fractional_max_pool3d"),
xfail("nn.functional.glu"),
xfail("nn.functional.grid_sample"),
xfail("nn.functional.group_norm"),
xfail("nn.functional.hardshrink"),
xfail("nn.functional.hardsigmoid"),
xfail("nn.functional.hardswish"),
xfail("nn.functional.hardtanh"),
xfail("nn.functional.huber_loss"),
xfail("nn.functional.instance_norm"),
xfail("nn.functional.interpolate", "area"),
xfail("nn.functional.interpolate", "bicubic"),
xfail("nn.functional.interpolate", "bilinear"),
xfail("nn.functional.interpolate", "linear"),
xfail("nn.functional.interpolate", "nearest"),
xfail("nn.functional.interpolate", "nearest-exact"),
xfail("nn.functional.interpolate", "trilinear"),
xfail("nn.functional.leaky_relu"),
xfail("nn.functional.linear"),
xfail("nn.functional.local_response_norm"),
xfail("nn.functional.logsigmoid"),
xfail("nn.functional.margin_ranking_loss"),
xfail("nn.functional.max_pool1d"),
xfail("nn.functional.max_pool2d"),
xfail("nn.functional.max_pool3d"),
xfail("nn.functional.max_unpool1d"),
xfail("nn.functional.max_unpool1d", "grad"),
xfail("nn.functional.max_unpool2d"),
xfail("nn.functional.max_unpool2d", "grad"),
xfail("nn.functional.max_unpool3d"),
xfail("nn.functional.max_unpool3d", "grad"),
xfail("nn.functional.mish"),
xfail("nn.functional.mse_loss"),
xfail("nn.functional.multi_margin_loss"),
xfail("nn.functional.multi_head_attention_forward"),
xfail("nn.functional.multilabel_margin_loss"),
xfail("nn.functional.multilabel_soft_margin_loss"),
xfail("nn.functional.normalize"),
xfail("nn.functional.pad", "constant"),
xfail("nn.functional.pad", "reflect"),
xfail("nn.functional.pad", "replicate"),
xfail("nn.functional.pad", "replicate_negative"),
xfail("nn.functional.pairwise_distance"),
xfail("nn.functional.pdist"),
xfail("nn.functional.pixel_shuffle"),
xfail("nn.functional.pixel_unshuffle"),
xfail("nn.functional.prelu"),
xfail("nn.functional.relu6"),
xfail("nn.functional.rrelu"),
xfail("nn.functional.selu"),
xfail("nn.functional.smooth_l1_loss"),
xfail("nn.functional.soft_margin_loss"),
xfail("nn.functional.softplus"),
xfail("nn.functional.softshrink"),
xfail("nn.functional.threshold"),
xfail("nn.functional.triplet_margin_loss"),
xfail("nn.functional.triplet_margin_with_distance_loss"),
xfail("nn.functional.unfold"),
xfail("nn.functional.upsample_bilinear"),
xfail("nn.functional.upsample_nearest"),
xfail("nonzero"),
xfail("normal"),
xfail("normal", "number_mean"),
xfail("normal", "in_place"),
xfail("ormqr"),
xfail("ones"),
xfail("pca_lowrank"),
xfail("pinverse"),
xfail("polar"),
xfail("put"),
xfail("quantile"),
xfail("rand_like"),
xfail("randint_like"),
xfail("randint"),
xfail("randn"),
xfail("randn_like"),
xfail("renorm"),
xfail("repeat_interleave"),
xfail("resize_"),
xfail("resize_as_"),
xfail("roll"),
xfail("rot90"),
xfail("rsub"),
xfail("scalar_tensor"),
xfail("scatter_add"),
xfail("scatter_reduce", "amax"),
xfail("scatter_reduce", "amin"),
xfail("scatter_reduce", "mean"),
xfail("scatter_reduce", "prod"),
xfail("scatter_reduce", "sum"),
xfail("searchsorted"),
xfail("select"),
xfail("select_scatter"),
xfail("sort"),
xfail("sparse.sampled_addmm"),
xfail("sparse.mm", "reduce"),
xfail("special.airy_ai"),
xfail("special.bessel_j0"),
xfail("special.bessel_j1"),
xfail("special.bessel_y0"),
xfail("special.bessel_y1"),
xfail("special.chebyshev_polynomial_t"),
xfail("special.chebyshev_polynomial_u"),
xfail("special.entr"),
xfail("special.erfcx"),
xfail("special.hermite_polynomial_h"),
xfail("special.hermite_polynomial_he"),
xfail("special.i0e"),
xfail("special.i1"),
xfail("special.i1e"),
xfail("special.laguerre_polynomial_l"),
xfail("special.log_ndtr"),
xfail("special.modified_bessel_i0"),
xfail("special.modified_bessel_i1"),
xfail("special.modified_bessel_k0"),
xfail("special.modified_bessel_k1"),
xfail("special.ndtri"),
xfail("special.scaled_modified_bessel_k0"),
xfail("special.scaled_modified_bessel_k1"),
xfail("special.spherical_bessel_j0"),
xfail("special.xlog1py"),
xfail("special.zeta"),
xfail("squeeze", "multiple"),
xfail("signal.windows.bartlett"),
xfail("signal.windows.blackman"),
xfail("signal.windows.cosine"),
xfail("signal.windows.exponential"),
xfail("signal.windows.gaussian"),
xfail("signal.windows.general_cosine"),
xfail("signal.windows.general_hamming"),
xfail("signal.windows.hamming"),
xfail("signal.windows.hann"),
xfail("signal.windows.nuttall"),
xfail("signal.windows.kaiser"),
xfail("stack"),
xfail("std"),
xfail("std", "unbiased"),
xfail("std_mean"),
xfail("std_mean", "unbiased"),
xfail("stft"),
xfail("svd_lowrank"),
xfail("t_copy"),
xfail("take"),
xfail("tensor_split"),
xfail("to_sparse"),
xfail("trace"),
xfail("trapezoid"),
xfail("trapz"),
xfail("triangular_solve"),
xfail("unbind"),
xfail("unfold"),
xfail("unfold_copy"),
xfail("uniform"),
xfail("unflatten"),
xfail("unique_consecutive"),
xfail("unique"),
xfail("unsafe_split"),
xfail("unsafe_chunk"),
xfail("_unsafe_masked_index"),
xfail("_unsafe_masked_index_put_accumulate"),
xfail("var_mean"),
xfail("var_mean", "unbiased"),
xfail("vdot"),
xfail("view_copy"),
xfail("zeros"),
# ops inside this might even fail without dtensor
# tests, as we rescale op db common test size factor (i.e. L, M, S)
# which triggered the original function run failures with input
# generation becomes wrong, we skip them for now but should enable later.
# TODO: need to clean this list and remove all cases
skip("argwhere"),
skip("cumprod"),
skip("__rmatmul__"),
skip("meshgrid", "list_of_tensors"),
skip("meshgrid", "variadic_tensors"),
skip("nn.functional.scaled_dot_product_attention"),
skip("nn.functional.softmin"),
skip("nn.functional.embedding"),
skip("nn.functional.embedding_bag"),
skip("nn.functional.feature_alpha_dropout", "with_train"),
skip("nn.functional.feature_alpha_dropout", "without_train"),
skip("nn.functional.hinge_embedding_loss"),
skip("nn.functional.cosine_embedding_loss"),
skip("fft.hfft"),
skip("fft.hfft2"),
skip("fft.hfft2"),
skip("fft.hfftn"),
skip("fft.ifftn"),
skip("fft.irfft"),
skip("istft"),
skip("isclose"),
skip("isreal"),
skip("matmul"),
skip("masked.mean"),
skip("masked.var"),
skip("masked.std"),
skip("masked.normalize"),
skip("prod"),
skip("_segment_reduce", "lengths"),
skip("_segment_reduce", "offsets"),
# TODO: fix the following ops
skip("squeeze"),
}
skip_bw = [
None, # corresponds to the transpose ops 'H' and 'T'
"torch.bucketize",
"torch.conj_physical",
"torch.eq",
"torch.isfinite",
"torch.isnan",
]
OP_DB_WORLD_SIZE = 4
DEVICE_TYPE = "cpu"
class TestDTensorOps(DTensorOpTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_tensor/test_embedding_ops.py
|
shard_embedding_fn
|
def shard_embedding_fn(name, module, device_mesh):
for name, param in module.named_parameters():
dist_param = torch.nn.Parameter(
distribute_tensor(param, device_mesh, [Shard(shard_dim)])
)
module.register_parameter(name, dist_param)
sharded_embedding = distribute_module(
embedding_mod, device_mesh, shard_embedding_fn
)
return sharded_embedding
|
import sys
import torch
from torch.distributed._tensor import (
distribute_module,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
funcol = torch.ops.c10d_functional
from torch.distributed.tensor._ops._embedding_ops import _MaskPartial
from torch.distributed.tensor._ops._embedding_ops import _MaskPartial
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_init.py
|
test_zeros_submesh
|
if __name__ == "__main__":
run_tests()
|
def test_zeros_submesh(self):
# default world_size is 4
# construct a cuda device 1d mesh, with no sub pg initialized
sub_mesh_list = [0, 3]
mesh = DeviceMesh(self.device_type, sub_mesh_list)
placements = [Shard(0)]
size = [32, 3]
dist_tensor = zeros(size, device_mesh=mesh, placements=placements)
self.assertEqual(dist_tensor.size(), torch.Size(size))
local_tensor = dist_tensor.to_local()
if self.rank in sub_mesh_list:
self.assertEqual(local_tensor.size(), torch.Size([16, 3]))
self.assertEqual(local_tensor, torch.zeros([16, 3]))
else:
self.assertEqual(local_tensor.size(), torch.Size([0]))
self.assertEqual(local_tensor, torch.zeros(0))
# construct a cuda device 1d mesh: unevenly, with subpg initialized
sub_mesh_list = [0, 1, 3]
mesh = DeviceMesh(self.device_type, sub_mesh_list)
placements = [Shard(0)]
size = [32, 3]
dist_tensor = zeros(size, device_mesh=mesh, placements=placements)
self.assertEqual(dist_tensor.size(), torch.Size(size))
local_tensor = dist_tensor.to_local()
if self.rank in sub_mesh_list:
if self.rank != 3:
self.assertEqual(local_tensor.size(), torch.Size([11, 3]))
self.assertEqual(local_tensor, torch.zeros([11, 3]))
else:
self.assertEqual(local_tensor.size(), torch.Size([10, 3]))
self.assertEqual(local_tensor, torch.zeros([10, 3]))
else:
self.assertEqual(local_tensor.size(), torch.Size([0]))
self.assertEqual(local_tensor, torch.tensor([]))
# construct a cuda device 2d mesh, with no subpg initialized
sub_mesh_list = [[0], [3]]
mesh = DeviceMesh(self.device_type, sub_mesh_list)
placements = [Shard(0), Shard(1)]
size = [32, 3]
dist_tensor = zeros(size, device_mesh=mesh, placements=placements)
self.assertEqual(dist_tensor.size(), torch.Size(size))
local_tensor = dist_tensor.to_local()
if self.rank in [0, 3]:
self.assertEqual(local_tensor.size(), torch.Size([16, 3]))
self.assertEqual(local_tensor, torch.zeros([16, 3]))
else:
self.assertEqual(local_tensor.size(), torch.Size([0]))
self.assertEqual(local_tensor, torch.tensor([]))
|
import torch
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard, zeros
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class DTensorConstructorTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/_tensor/test_math_ops.py
|
_check_module
|
def _check_module(self, m1, m2, check_grad=False):
named_parameters = dict(m1.named_parameters())
for name, param_m2 in m2.named_parameters():
self.assertTrue(name in named_parameters)
param_m1 = named_parameters[name]
if check_grad:
param_m2 = param_m2.grad
param_m1 = param_m1.grad
if isinstance(param_m2, DTensor):
replicate = [Replicate()]
param_m2 = param_m2.redistribute(
device_mesh=param_m2.device_mesh, placements=replicate
).to_local()
self.assertEqual(param_m2, param_m1)
|
import copy
import itertools
from pprint import pformat
from typing import NamedTuple
import torch
from torch.distributed._tensor import (
DeviceMesh,
distribute_module,
distribute_tensor,
DTensor,
)
from torch.distributed._tensor.placement_types import Replicate, Shard
from torch.distributed.tensor._ops.utils import is_tensor_partial, normalize_dim
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
SequenceParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_unless_torch_gpu,
with_comms,
)
funcol = torch.ops.c10d_functional
class DistMathOpsTest(DTensorTestBase):
from torch.distributed._tensor.placement_types import TensorMeta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_math_ops.py
|
_replicate_fn
|
def _replicate_fn(name, module, device_mesh):
for name, param in module.named_parameters():
if name in ["weight", "bias"]:
param_dist = torch.nn.Parameter(
distribute_tensor(param, device_mesh, [Replicate()])
)
module.register_parameter(name, param_dist)
layer_norm_dist = distribute_module(layer_norm, device_mesh, _replicate_fn)
x_local = x
x_dist = distribute_tensor(x, device_mesh, [Shard(shard_dim)])
y_local = layer_norm_local(x_local)
# make sure that forward layer norm does not introduce extra collectives
comm_mode = CommDebugMode()
with comm_mode:
y_dist = layer_norm_dist(x_dist)
self.assertLessEqual(
comm_mode.get_total_counts(),
1, # TODO: This should be 0!
f"comm count={comm_mode.get_total_counts()}, "
f"shard_dim={shard_dim}, norm_shape={normalized_shape}, elem_affine={elementwise_affine}",
)
from torch.distributed._tensor.placement_types import TensorMeta
dtensor_meta = y_dist._spec.tensor_meta
assert isinstance(dtensor_meta, TensorMeta)
# make sure the right shape in sharding prop
self.assertEqual(y_local.shape, dtensor_meta.shape)
self.assertEqual(y_local, y_dist.full_tensor())
|
import copy
import itertools
from pprint import pformat
from typing import NamedTuple
import torch
from torch.distributed._tensor import (
DeviceMesh,
distribute_module,
distribute_tensor,
DTensor,
)
from torch.distributed._tensor.placement_types import Replicate, Shard
from torch.distributed.tensor._ops.utils import is_tensor_partial, normalize_dim
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
SequenceParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_unless_torch_gpu,
with_comms,
)
funcol = torch.ops.c10d_functional
from torch.distributed._tensor.placement_types import TensorMeta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_math_ops.py
|
_replicate_fn
|
def _replicate_fn(name, module, device_mesh):
for name, param in module.named_parameters():
if name in ["weight", "bias"]:
param_dist = torch.nn.Parameter(
distribute_tensor(param, device_mesh, [Replicate()])
)
module.register_parameter(name, param_dist)
layer_norm_dist = distribute_module(layer_norm, device_mesh, _replicate_fn)
x_local = x
x_dist = distribute_tensor(x, device_mesh, [Shard(shard_dim)])
y_local = layer_norm_local(x_local)
# make sure that forward layer norm does not introduce extra collectives
comm_mode = CommDebugMode()
with comm_mode:
y_dist = layer_norm_dist(x_dist)
self.assertLessEqual(
comm_mode.get_total_counts(),
1, # TODO: This should be 0!
f"comm count={comm_mode.get_total_counts()}, "
f"shard_dim={shard_dim}, norm_shape={normalized_shape}, elem_affine={elementwise_affine}",
)
from torch.distributed._tensor.placement_types import TensorMeta
dtensor_meta = y_dist._spec.tensor_meta
assert isinstance(dtensor_meta, TensorMeta)
# make sure the right shape in sharding prop
self.assertEqual(y_local.shape, dtensor_meta.shape)
self.assertEqual(y_local, y_dist.full_tensor())
|
import copy
import itertools
from pprint import pformat
from typing import NamedTuple
import torch
from torch.distributed._tensor import (
DeviceMesh,
distribute_module,
distribute_tensor,
DTensor,
)
from torch.distributed._tensor.placement_types import Replicate, Shard
from torch.distributed.tensor._ops.utils import is_tensor_partial, normalize_dim
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
SequenceParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_unless_torch_gpu,
with_comms,
)
funcol = torch.ops.c10d_functional
from torch.distributed._tensor.placement_types import TensorMeta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_math_ops.py
|
__init__
|
def __init__(self):
super().__init__()
self.preln_embeddings = torch.nn.Embedding(
vocab_size, embedding_dim
)
self.layer_norm = torch.nn.LayerNorm(
normalized_shape, elementwise_affine=elementwise_affine
)
self.postln_linear = torch.nn.Linear(
embedding_dim, embedding_dim
)
|
import copy
import itertools
from pprint import pformat
from typing import NamedTuple
import torch
from torch.distributed._tensor import (
DeviceMesh,
distribute_module,
distribute_tensor,
DTensor,
)
from torch.distributed._tensor.placement_types import Replicate, Shard
from torch.distributed.tensor._ops.utils import is_tensor_partial, normalize_dim
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
SequenceParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_unless_torch_gpu,
with_comms,
)
funcol = torch.ops.c10d_functional
from torch.distributed._tensor.placement_types import TensorMeta
class LnTpBlock(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_math_ops.py
|
forward
|
def forward(self, tokens):
h = self.preln_embeddings(tokens)
h = self.layer_norm(h)
output = self.postln_linear(h)
return output
|
import copy
import itertools
from pprint import pformat
from typing import NamedTuple
import torch
from torch.distributed._tensor import (
DeviceMesh,
distribute_module,
distribute_tensor,
DTensor,
)
from torch.distributed._tensor.placement_types import Replicate, Shard
from torch.distributed.tensor._ops.utils import is_tensor_partial, normalize_dim
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
SequenceParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_unless_torch_gpu,
with_comms,
)
funcol = torch.ops.c10d_functional
from torch.distributed._tensor.placement_types import TensorMeta
class LnTpBlock(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_optimizers.py
|
test_asgd_1d_sharding
|
def test_asgd_1d_sharding(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
asgd_configs = [
{"lr": 0.1, "foreach": False},
{"lr": 0.1, "lambd": 0.001, "foreach": False},
{"lr": 0.1, "lambd": 0.001, "alpha": 0.85, "foreach": False},
{"lr": 0.1, "lambd": 0.001, "alpha": 0.85, "t0": 1e5, "foreach": False},
{
"lr": 0.1,
"lambd": 0.001,
"alpha": 0.85,
"t0": 1e5,
"weight_decay": 0.05,
"foreach": False,
},
{
"lr": 0.1,
"lambd": 0.001,
"alpha": 0.85,
"t0": 1e5,
"weight_decay": 0.05,
"foreach": True,
},
{
"lr": 0.1,
"lambd": 0.001,
"alpha": 0.85,
"t0": 1e5,
"weight_decay": 0.05,
"foreach": True,
"maximize": True,
},
]
for config in asgd_configs:
mod = MLPModule(self.device_type)
opt = torch.optim.ASGD(mod.parameters(), **config)
dist_mod = distribute_module(
deepcopy(mod), mesh, shard_fn, input_fn, output_fn
)
dist_opt = torch.optim.ASGD(dist_mod.parameters(), **config)
# use ones to make sure the single machine model have the same input
# on different ranks
inp = torch.ones(8, 10, device=self.device_type)
# TODO: We want to keep a unit test for ASGD optimizer for the time being, but we need to look into why
# when using ASGD we need higher atol and rtol when comparing model parameters.
# Default 'rtol' and 'atol' for attr:`~torch.float32` are ``1.3e-6`` and ``1e-5``
# Pointer here: https://github.com/pytorch/pytorch/blob/main/torch/testing/_comparison.py#L65
self._assert_optimizer(
mesh, mod, opt, dist_mod, dist_opt, inp, atol=1.3e-5, rtol=1e-4
)
|
from copy import deepcopy
import torch
import torch.nn as nn
from torch.distributed._tensor import (
DeviceMesh,
distribute_module,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
with_comms,
)
class TestDTensorOptimizer(DTensorTestBase):
from torch.optim.optimizer import _foreach_supported_types
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_pointwise_ops.py
|
test_partial_add
|
def test_partial_add(self):
device_mesh = self.build_device_mesh()
d_1 = DTensor.from_local(torch.rand(2, 2), device_mesh, [Partial()])
d_2 = DTensor.from_local(torch.rand(2, 2), device_mesh, [Partial()])
d_3 = d_1 + d_2
self.assertTrue(d_3._spec.placements[0].is_partial())
|
from typing import Any, Callable, Dict, Optional, Sequence
from unittest import skip
import torch
import torch.utils._pytree as pytree
from torch import Tensor
from torch.distributed._tensor import DeviceMesh, distribute_tensor, DTensor
from torch.distributed._tensor.placement_types import (
Partial,
Placement,
Replicate,
Shard,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorOpTestBase,
skip_unless_torch_gpu,
)
class DistElementwiseOpsTest(DTensorOpTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_pointwise_ops.py
|
test_partial_mul
|
def test_partial_mul(self):
device_mesh = self.build_device_mesh()
d_1 = DTensor.from_local(torch.ones(2, 2), device_mesh, [Partial()])
d_2 = DTensor.from_local(torch.ones(2, 2), device_mesh, [Partial()])
d_3 = d_1 * d_2
self.assertTrue(d_3._spec.placements[0].is_replicate())
self.assertEqual(d_3.to_local(), torch.ones(2, 2) * (self.world_size**2))
|
from typing import Any, Callable, Dict, Optional, Sequence
from unittest import skip
import torch
import torch.utils._pytree as pytree
from torch import Tensor
from torch.distributed._tensor import DeviceMesh, distribute_tensor, DTensor
from torch.distributed._tensor.placement_types import (
Partial,
Placement,
Replicate,
Shard,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorOpTestBase,
skip_unless_torch_gpu,
)
class DistElementwiseOpsTest(DTensorOpTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_math_ops.py
|
test_linalg_eigh
|
if __name__ == "__main__":
run_tests()
|
def test_linalg_eigh(self):
A = torch.randn(2, 2, dtype=torch.float64)
mesh = self.build_device_mesh()
dtensor_A = distribute_tensor(A, device_mesh=mesh, placements=[Replicate()])
dtensor_A = dtensor_A + dtensor_A.mT
dtensor_L, dtensor_Q = torch.linalg.eigh(dtensor_A)
# TODO: we need to convert A, L, Q to local because we don't have a
# sharding strategy registered for aten.dist.default yet.
local_A, local_L, local_Q = (
dtensor_A.to_local(),
dtensor_L.to_local(),
dtensor_Q.to_local(),
)
distance = torch.dist(local_Q @ torch.diag(local_L) @ local_Q.mT, local_A)
self.assertEqual(distance.item(), 0.0)
|
import copy
import itertools
from pprint import pformat
from typing import NamedTuple
import torch
from torch.distributed._tensor import (
DeviceMesh,
distribute_module,
distribute_tensor,
DTensor,
)
from torch.distributed._tensor.placement_types import Replicate, Shard
from torch.distributed.tensor._ops.utils import is_tensor_partial, normalize_dim
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
SequenceParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_unless_torch_gpu,
with_comms,
)
funcol = torch.ops.c10d_functional
class DistMathOpsTest(DTensorTestBase):
from torch.distributed._tensor.placement_types import TensorMeta
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/_tensor/test_tensor_ops.py
|
test_slice
|
if __name__ == "__main__":
run_tests()
|
def test_slice(self):
mesh = DeviceMesh(self.device_type, list(range(self.world_size))) # 1D mesh
comm_mode = CommDebugMode()
shard_spec = [Shard(1)]
global_tensor = torch.randn(8, 16, requires_grad=True)
sharded_dtensor = distribute_tensor(global_tensor, mesh, shard_spec)
global_out = global_tensor[:, 8:]
with comm_mode:
sharded_out = sharded_dtensor[:, 8:]
self.assertEqual(comm_mode.get_total_counts(), 1)
global_out.backward(gradient=torch.ones_like(global_out))
with comm_mode:
sharded_out_grad = torch.distributed._tensor.ones(
sharded_out.shape, device_mesh=mesh, placements=[Shard(1)]
)
sharded_out.backward(gradient=sharded_out_grad)
self.assertEqual(comm_mode.get_total_counts(), 1)
self.assertEqual(sharded_out.full_tensor(), global_out)
self.assertEqual(sharded_dtensor.grad.full_tensor(), global_tensor.grad)
|
import torch
from torch.distributed._tensor import DeviceMesh, distribute_tensor, DTensor
from torch.distributed._tensor.placement_types import Partial, Replicate, Shard
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorConverter,
DTensorTestBase,
with_comms,
)
class DistTensorOpsTest(DTensorTestBase):
from torch.distributed.tensor._ops._embedding_ops import _MaskPartial
from torch.distributed.tensor.debug import _get_sharding_prop_cache_info
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/_tensor/test_tp_sharding_ops.py
|
test_split_partial_tensor
|
def test_split_partial_tensor(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
tensor = torch.rand(3, 5, 6, device=self.device_type)
dist_tensor = DTensor.from_local(tensor, device_mesh, [_Partial()])
with self.assertRaisesRegex(
RuntimeError,
"_Partial placement is not implemented",
):
dist_tensor = dist_tensor.split(3)
|
import torch
from torch.distributed._tensor import (
DeviceMesh,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.distributed._tensor.placement_types import _Partial
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class TPShardingOpsTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_tensor/test_redistribute.py
|
test_redistribute_shard_dim_multi_dim_mesh
|
if __name__ == "__main__":
run_tests()
|
def test_redistribute_shard_dim_multi_dim_mesh(self):
mesh = init_device_mesh(self.device_type, (2, 2, 2))
input_data = torch.randn((8, 8, 8), device=self.device_type)
sharding_src_dst_pairs_3d = [
([Shard(0), Shard(0), Shard(0)], [Shard(1), Shard(1), Shard(1)]),
([Shard(0), Shard(1), Shard(0)], [Shard(1), Shard(0), Shard(0)]),
([Shard(0), Shard(1), Shard(2)], [Shard(2), Shard(1), Shard(0)]),
([Shard(1), Shard(0), Shard(0)], [Replicate(), Shard(0), Shard(0)]),
([Shard(1), Replicate(), Shard(0)], [Replicate(), Shard(0), Shard(0)]),
([Shard(0), Shard(0), Shard(1)], [Shard(0), Shard(1), Shard(2)]),
]
comm_counts_3d = [
3, # 2: S0 - R, 1: S1 -> R, 0: S0 -> S1
3, # 2: S0 -> R, 1: S1 -> R, 0: S0 -> S1, 1: R -> S0, 2: R -> S0
2, # 2: S2 -> R, 0: S1 -> S2
1, # 0: S1 -> R
2, # 2: S0 -> R, 1: R -> S0, 2: R -> S0, 0: S1 -> R
2, # 2: S1 -> S2, 1: S0 -> S1
]
comm_mode = CommDebugMode()
for idx, (src_placement, dst_placement) in enumerate(sharding_src_dst_pairs_3d):
expected_dt = distribute_tensor(input_data.clone(), mesh, dst_placement)
sharded_dt = distribute_tensor(input_data, mesh, src_placement)
with comm_mode:
out_dt = sharded_dt.redistribute(mesh, dst_placement)
self.assertEqual(out_dt.placements, expected_dt.placements)
self.assertEqual(comm_mode.get_total_counts(), comm_counts_3d[idx])
local_out_dt = out_dt.to_local()
local_expected_dt = expected_dt.to_local()
self.assertEqual(local_out_dt, local_expected_dt)
|
import itertools
import torch
from torch.distributed._tensor import DeviceMesh, distribute_tensor, DTensor
from torch.distributed._tensor.placement_types import Partial, Replicate, Shard
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor._collective_utils import shard_dim_alltoall
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
funcol = torch.ops.c10d_functional
from torch.distributed.tensor._redistribute import Redistribute
class MultiDimRedistributeTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/_tensor/test_utils.py
|
test_strided_sharding_assumption_in_meta_compute
|
def test_strided_sharding_assumption_in_meta_compute(self):
# current ``compute_local_shape_and_global_offset`` does not allow Shard(i)
# placement to appear after the strided sharding part has ended. This test
# check that ``compute_local_shape_and_global_offset`` does not allow placements
# that violate the assumption and does not forbid the allowed ones.
# Test 0: 2-D mesh
mesh_size_0 = 2
mesh_size_1 = self.world_size // mesh_size_0
global_mesh = init_device_mesh(
self.device_type,
(mesh_size_0, mesh_size_1),
mesh_dim_names=("mesh-0", "mesh-1"),
)
global_tensor_shape = torch.Size([2 * self.world_size, 2 * self.world_size])
for shard_dim in [0, 1]:
placements = [
_StridedShard(shard_dim, split_factor=mesh_size_1),
Shard(shard_dim),
]
_, _ = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
# Test 1: 3-D mesh
mesh_size_0 = 2
mesh_size_1 = 2
mesh_size_2 = self.world_size // (mesh_size_0 * mesh_size_1)
global_mesh = init_device_mesh(
self.device_type,
(mesh_size_0, mesh_size_1, mesh_size_2),
mesh_dim_names=("mesh-0", "mesh-1", "mesh-2"),
)
# legal placements: Shard() appear after the strided part but it's on another
# tensor dimension.
placements = [
_StridedShard(0, split_factor=mesh_size_1),
Shard(0),
Shard(1),
]
_, _ = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
# illegal placements: Shard() appear after the strided part and it's on the
# same tensor dimension.
placements = [
_StridedShard(0, split_factor=mesh_size_1),
Shard(0),
Shard(0),
]
with self.assertRaisesRegex(NotImplementedError, "the strided part has ended"):
_, _ = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
# Test 2: 4-D mesh
mesh_size_0 = 1
mesh_size_1 = 2
mesh_size_2 = 2
mesh_size_3 = self.world_size // (mesh_size_0 * mesh_size_1 * mesh_size_2)
global_mesh = init_device_mesh(
self.device_type,
(mesh_size_0, mesh_size_1, mesh_size_2, mesh_size_3),
mesh_dim_names=("mesh-0", "mesh-1", "mesh-2", "mesh-3"),
)
# legal placements: Shard() appear after the strided part but it's on another
# tensor dimension.
placements = [
_StridedShard(0, split_factor=mesh_size_1),
_StridedShard(1, split_factor=mesh_size_3),
Shard(0),
Shard(1),
]
local_shape, _ = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
expected_local_shape = (
2 * mesh_size_1 * mesh_size_3,
2 * mesh_size_0 * mesh_size_2,
)
self.assertEqual(local_shape, expected_local_shape)
# illegal placements: Shard() appear after the strided part and it's on the
# same tensor dimension.
placements = [
_StridedShard(0, split_factor=mesh_size_1),
_StridedShard(1, split_factor=mesh_size_3),
Shard(0),
Shard(0),
]
with self.assertRaisesRegex(NotImplementedError, "the strided part has ended"):
_, _ = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
|
import itertools
import torch
from torch.distributed._tensor import distribute_tensor, DTensor
from torch.distributed._tensor._utils import (
compute_local_shape,
compute_local_shape_and_global_offset,
)
from torch.distributed.device_mesh import DeviceMesh, init_device_mesh
from torch.distributed.tensor._dtensor_spec import DTensorSpec, TensorMeta
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.placement_types import _StridedShard, Replicate, Shard
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
class UtilTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_utils.py
|
test_2d_mesh_2d_tensor_strided_sharding
|
def test_2d_mesh_2d_tensor_strided_sharding(self):
# Test 2: 1-d tensor over 2-d mesh
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dim0", "dim1")
)
mesh_dim0_size = mesh_2d["dim0"].size()
mesh_dim1_size = mesh_2d["dim1"].size()
mesh_dim0_local_rank = mesh_2d["dim0"].get_local_rank(mesh_dim=0)
mesh_dim1_local_rank = mesh_2d["dim1"].get_local_rank(mesh_dim=0)
x = torch.arange(2 * self.world_size, device=self.device_type).reshape(2, -1)
"""
strided sharding:
rank 0: [[0], [4]]
rank 1: [[2], [6]]
rank 2: [[1], [5]]
rank 3: [[3], [7]]
"""
split_factor = 2
# shard on mesh dim-0
shard_placement_dim0 = _StridedShard(1, split_factor=split_factor)
tensor_list, _ = shard_placement_dim0._split_tensor(x, mesh_dim0_size)
shard_x = tensor_list[mesh_dim0_local_rank]
expected_shard_dim0 = (
torch.tensor([[0, 2], [4, 6]], device=self.device_type)
if mesh_dim0_local_rank == 0
else torch.tensor([[1, 3], [5, 7]], device=self.device_type)
)
self.assertEqual(shard_x, expected_shard_dim0)
# shard on mesh dim-1
shard_placement_dim1 = _StridedShard(1, split_factor=1) # same as Shard(1)
tensor_list, _ = shard_placement_dim1._split_tensor(shard_x, mesh_dim1_size)
shard_x = tensor_list[mesh_dim1_local_rank]
expected_shard_dim1 = [
torch.tensor(value, device=self.device_type)
for value in [[[0], [4]], [[2], [6]], [[1], [5]], [[3], [7]]]
][self.rank]
self.assertEqual(shard_x, expected_shard_dim1)
# shard_to_replicate on mesh dim-1
full_tensor = shard_placement_dim1._to_replicate_tensor(
shard_x,
mesh_2d,
mesh_dim=1,
current_logical_shape=list(expected_shard_dim0.shape),
)
self.assertEqual(full_tensor, expected_shard_dim0)
# shard_to_replicate on mesh dim-0
full_tensor = shard_placement_dim0._to_replicate_tensor(
full_tensor,
mesh_2d,
mesh_dim=0,
current_logical_shape=list(x.shape),
)
self.assertEqual(full_tensor, x)
|
import itertools
import torch
from torch.distributed._tensor import distribute_tensor, DTensor
from torch.distributed._tensor._utils import (
compute_local_shape,
compute_local_shape_and_global_offset,
)
from torch.distributed.device_mesh import DeviceMesh, init_device_mesh
from torch.distributed.tensor._dtensor_spec import DTensorSpec, TensorMeta
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.placement_types import _StridedShard, Replicate, Shard
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
class TestStridedSharding(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_utils.py
|
test_fsdp2_tp_2d_dtensor_local_shards_and_offsets
|
def test_fsdp2_tp_2d_dtensor_local_shards_and_offsets(self):
# We are mimicking the behavior of FSDP2 + TP.
# Currently, the 2D DTensor's local shard is incorrect for resharding, since we want to avoid extra communication.
# It's incorrect for resharding, since `compute_local_shape_and_global_offset`
# doesn't know the correct offsets for resharding.
# When we have a global_tensor of [0, 1, 2, 3, 4, 5, 6, 7], the local shard of 2D DTensor would be:
# local tensor -- rank0: [0, 1], rank1: [4, 5], rank2: [2, 3], rank3: [6, 7]
# current offsets -- rank0: [0, 0], rank1: [1, 0], rank2: [2, 0], rank3: [3, 0]
# Ideally, with strided sharding, the offsets should be rank0: [0, 0], rank1: [2, 0], rank2: [1, 0], rank3: [3, 0]
# TODO: to make the local shard of FSDP2 + TP correct for resharding, it would require strided_sharding
# as well as let compute_local_shape_and_global_offset takes into consideration of strided_sharding.
global_tensor = torch.arange(8).view(4, 2)
with CommDebugMode() as comm_mode:
mesh_2d = init_device_mesh(
self.device_type, (2, 2), mesh_dim_names=("DP", "TP")
)
tp_mesh = mesh_2d["TP"]
dtensor_tp = distribute_tensor(
global_tensor, tp_mesh, placements=[Shard(0)]
)
chunks = list(torch.chunk(dtensor_tp.to_local(), 2, dim=0))
shard_rank = 0 if self.rank // 2 == 0 else 1
sharded_param = chunks[shard_rank]
spec_2d = DTensorSpec(
mesh=mesh_2d,
placements=(_StridedShard(0, split_factor=2), Shard(0)),
tensor_meta=TensorMeta(
global_tensor.size(),
global_tensor.stride(),
global_tensor.dtype,
),
)
dtensor_2d = DTensor(
sharded_param,
spec_2d,
requires_grad=False,
)
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 0
)
self.assertEqual(global_tensor, dtensor_2d.full_tensor())
|
import itertools
import torch
from torch.distributed._tensor import distribute_tensor, DTensor
from torch.distributed._tensor._utils import (
compute_local_shape,
compute_local_shape_and_global_offset,
)
from torch.distributed.device_mesh import DeviceMesh, init_device_mesh
from torch.distributed.tensor._dtensor_spec import DTensorSpec, TensorMeta
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.placement_types import _StridedShard, Replicate, Shard
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
class Test2DStridedLocalShard(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_tensor_ops.py
|
test_equal
|
def test_equal(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
shard_spec = [Shard(0)]
input_tensor_1 = torch.ones(4, 4)
dist_tensor_1 = DTensor.from_local(input_tensor_1, device_mesh, shard_spec)
# tensors are equal
input_tensor_2 = torch.ones(4, 4)
dist_tensor_2 = DTensor.from_local(input_tensor_2, device_mesh, shard_spec)
eq_result = dist_tensor_1.equal(dist_tensor_2)
self.assertTrue(eq_result)
# tensors are different on some shards
if self.rank == 0:
input_tensor_2 = torch.ones(4, 4)
else:
input_tensor_2 = torch.randn(4, 4)
dist_tensor_2 = DTensor.from_local(input_tensor_2, device_mesh, shard_spec)
eq_result = dist_tensor_1.equal(dist_tensor_2)
# equal op all reduces each shard's local result
self.assertFalse(eq_result)
self.assertTrue(dist_tensor_1.is_same_size(dist_tensor_2))
# test if sharding are different
replica_spec = [Replicate()]
global_input = torch.ones(4 * self.world_size, 4)
dist_tensor_3 = DTensor.from_local(
global_input, device_mesh, replica_spec, run_check=False
)
self.assertTrue(dist_tensor_1.equal(dist_tensor_3))
self.assertTrue(dist_tensor_1.is_same_size(dist_tensor_3))
# test sharding difference with only some shards content difference
self.assertFalse(dist_tensor_2.equal(dist_tensor_3))
self.assertTrue(dist_tensor_1.is_same_size(dist_tensor_3))
self.assertFalse(input_tensor_2.is_same_size(dist_tensor_3))
|
import torch
from torch.distributed._tensor import DeviceMesh, distribute_tensor, DTensor
from torch.distributed._tensor.placement_types import Partial, Replicate, Shard
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorConverter,
DTensorTestBase,
with_comms,
)
class DistTensorOpsTest(DTensorTestBase):
from torch.distributed.tensor._ops._embedding_ops import _MaskPartial
from torch.distributed.tensor.debug import _get_sharding_prop_cache_info
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_random_ops.py
|
test_meta_tensor_init
|
def test_meta_tensor_init(self):
# test suite sets each rank's seed to the same value but in actual
# execution the default random seed will be different (a random value).
# The DTensor random ops will use the same random seed even though the
# torch random generator keeps different seeds on ranks. This ensures
# that Replicate DTensor will have the same initialized results
# across ranks.
torch.cuda.manual_seed(self.rank)
device_mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
size = [1024, 2048]
meta_dtensor = distribute_tensor(
torch.empty(*size, device="meta"), device_mesh, [Replicate()]
)
self.assertTrue(meta_dtensor.is_meta)
dtensor = torch.empty_like(meta_dtensor, device=self.device_type)
# disable the distribute region for RNG
random._rng_tracker.distribute_region_enabled = False
dtensor.uniform_()
# allgather the local tensors
local_tensor = funcol.all_gather_tensor(
dtensor.to_local(), gather_dim=0, group=(device_mesh, 0)
)
# compare with local tensors from other ranks
self_slice = slice(1024 * self.rank, 1024 * self.rank + 1024)
for other_rank in range(self.world_size):
# the RNG result on each rank differs even they're supposed
# to be replicated
if self.rank != other_rank:
other_slice = slice(1024 * other_rank, 1024 * other_rank + 1024)
self.assertNotEqual(
local_tensor[self_slice, :], local_tensor[other_slice, :]
)
# enable the distribute region for RNG
random._rng_tracker.distribute_region_enabled = True
self.assertTrue(meta_dtensor.is_meta)
dtensor = torch.empty_like(meta_dtensor, device=self.device_type)
dtensor.uniform_()
# allgather the local tensors
local_tensor = funcol.all_gather_tensor(
dtensor.to_local(), gather_dim=0, group=(device_mesh, 0)
)
# compare with local tensors from other ranks
for other_rank in range(self.world_size):
# the RNG result on each rank are the same because they're replicated
if self.rank != other_rank:
# other rank should have an identical local tensor
other_slice = slice(1024 * other_rank, 1024 * other_rank + 1024)
self.assertEqual(
local_tensor[self_slice, :], local_tensor[other_slice, :]
)
|
import itertools
import torch
import torch.distributed._functional_collectives as funcol
import torch.distributed.tensor._random as random
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor._utils import compute_local_shape_and_global_offset
from torch.distributed._tensor.api import distribute_tensor
from torch.distributed._tensor.placement_types import Replicate, Shard
from torch.distributed.distributed_c10d import broadcast_object_list
from torch.distributed.tensor._random import is_rng_supported_mesh, manual_seed
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
skip_unless_torch_gpu,
with_comms,
)
class DistTensorRandomOpTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_xla_integration.py
|
text_xla_distribute_module
|
def text_xla_distribute_module(self):
import torch_xla # type:ignore[import]
import torch_xla.core.xla_model as xm # type:ignore[import]
import torch_xla.runtime as xr # type:ignore[import]
model = self.SimpleLinear().to(xm.xla_device())
device_count = xr.global_runtime_device_count()
device_mesh = DeviceMesh("xla", list(range(device_count)))
def shard_params(mod_name, mod, mesh):
shard_spec = [Shard(0)]
# annoate fc1 and fc2
if isinstance(mod, nn.Linear):
for name, param in mod.named_parameters():
# annotate the parameter tensors directly
distribute_tensor(param, mesh, shard_spec)
sharded_model = distribute_module(model, device_mesh, shard_params)
self.assertTrue(
torch_xla._XLAC._get_xla_sharding_spec(sharded_model.fc1.weight) != ""
)
self.assertTrue(
torch_xla._XLAC._get_xla_sharding_spec(sharded_model.fc2.weight) != ""
)
|
import os
import unittest
from functools import wraps
from typing import Any, Callable, Dict, Tuple
import numpy as np
import torch
from torch import nn
from torch.distributed._tensor import (
DeviceMesh,
distribute_module,
distribute_tensor,
Replicate,
Shard,
)
from torch.testing._internal.common_utils import run_tests, TestCase
import torch_xla # type:ignore[import] # noqa: F401
class DTensorXLAIntegrationTest(TestCase):
import torch_xla.runtime as xr # type:ignore[import]
import torch_xla.runtime as xr # type:ignore[import]
import torch_xla.runtime as xr # type:ignore[import]
import torch_xla # type:ignore[import]
import torch_xla.core.xla_model as xm # type:ignore[import]
import torch_xla.runtime as xr # type:ignore[import]
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_xla_integration.py
|
shard_params
|
def shard_params(mod_name, mod, mesh):
shard_spec = [Shard(0)]
# annoate fc1 and fc2
if isinstance(mod, nn.Linear):
for name, param in mod.named_parameters():
# annotate the parameter tensors directly
distribute_tensor(param, mesh, shard_spec)
sharded_model = distribute_module(model, device_mesh, shard_params)
self.assertTrue(
torch_xla._XLAC._get_xla_sharding_spec(sharded_model.fc1.weight) != ""
)
self.assertTrue(
torch_xla._XLAC._get_xla_sharding_spec(sharded_model.fc2.weight) != ""
)
|
import os
import unittest
from functools import wraps
from typing import Any, Callable, Dict, Tuple
import numpy as np
import torch
from torch import nn
from torch.distributed._tensor import (
DeviceMesh,
distribute_module,
distribute_tensor,
Replicate,
Shard,
)
from torch.testing._internal.common_utils import run_tests, TestCase
import torch_xla # type:ignore[import] # noqa: F401
import torch_xla.runtime as xr # type:ignore[import]
import torch_xla.runtime as xr # type:ignore[import]
import torch_xla.runtime as xr # type:ignore[import]
import torch_xla # type:ignore[import]
import torch_xla.core.xla_model as xm # type:ignore[import]
import torch_xla.runtime as xr # type:ignore[import]
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_fsdp2_mem_tracker.py
|
_init_cublas_workspace
|
def _init_cublas_workspace(dev: torch.device):
lin = torch.nn.Linear(768, 768, device=dev)
inp = torch.randn(1, 768, device=dev)
lin(inp).sum().backward()
del lin
del inp
|
import functools
import gc
from typing import Union
import torch
import torch.nn as nn
from torch.distributed._composable import checkpoint
from torch.distributed._composable.fsdp import (
CPUOffloadPolicy,
fully_shard,
MixedPrecisionPolicy,
OffloadPolicy,
)
from torch.distributed._tensor import init_device_mesh
from torch.distributed._tools.fsdp2_mem_tracker import FSDPMemTracker
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
apply_activation_checkpointing,
CheckpointWrapper,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
ModelArgs,
Transformer,
TransformerBlock,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_fsdp2_mem_tracker.py
|
_reset_mem_stats
|
def _reset_mem_stats(dev: torch.device):
torch.cuda.empty_cache()
torch.cuda.reset_accumulated_memory_stats(dev)
torch.cuda.reset_peak_memory_stats(dev)
class TestTrackerFullyShard1DTrainingCore(FSDPTest):
@property
def world_size(self) -> int:
return min(4, torch.cuda.device_count())
@skip_if_lt_x_gpu(2)
def test_tracker_multi_group_eager(self):
"""
Tests tracker accuracy when using multiple parameter groups for
communication (for communication and computation overlap plus memory
reduction) and different mixed precision policies.
"""
self.run_subtests(
{
"reshard_after_forward": [True, False],
"offload_policy": [
CPUOffloadPolicy(pin_memory=False),
OffloadPolicy(),
],
"mp_policy": [
MixedPrecisionPolicy(
param_dtype=torch.float16, reduce_dtype=torch.float32
),
],
},
self._test_tracker_multi_group,
)
def _test_tracker_multi_group(
self,
reshard_after_forward: Union[bool, int],
offload_policy: OffloadPolicy,
mp_policy: MixedPrecisionPolicy,
):
debug = False
dev = torch.device(torch.cuda.current_device())
_init_cublas_workspace(dev)
gc.collect()
_reset_mem_stats(dev)
mem_stats = torch.cuda.memory_stats(dev)
pre_cuda_active = mem_stats["active_bytes.all.current"]
torch.manual_seed(42)
lin_dim, bsz = 2048, 8192
with torch.device(dev):
model = nn.Sequential(*[MLP(dim=lin_dim, device=dev) for _ in range(4)])
mesh = init_device_mesh("cuda", (self.world_size,))
fully_shard_fn = functools.partial(
fully_shard,
mesh=mesh,
reshard_after_forward=reshard_after_forward,
offload_policy=offload_policy,
mp_policy=mp_policy,
)
for mlp in model:
fully_shard_fn(mlp)
fully_shard_fn(model)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
inp = torch.randn((bsz, lin_dim), device=dev)
fmt = FSDPMemTracker(model, optim)
fmt.track_inputs((inp,))
with fmt:
for iter_idx in range(2):
loss = model(inp).sum()
loss.backward()
optim.step()
optim.zero_grad()
if iter_idx == 0:
fmt.reset_mod_stats()
mem_stats = torch.cuda.memory_stats()
tracker_max = fmt.get_tracker_snapshot("peak")[dev]["Total"]
cuda_max = mem_stats["active_bytes.all.peak"] - pre_cuda_active
accuracy = tracker_max / cuda_max
if self.rank == 0 and debug:
print(f"Accuracy: {accuracy} Tracker Max:{tracker_max} CUDA Max:{cuda_max}")
self.assertAlmostEqual(
accuracy,
1.0,
delta=0.1,
msg=f"Tracker Max:{tracker_max} CUDA Max:{cuda_max}",
)
del model
del inp
del optim
@skip_if_lt_x_gpu(2)
def test_tracker_non_root_forward_backward(self):
"""
Tests tracker accracy when running forward/backward through a non-root.
"""
debug = False
dev = torch.device(torch.cuda.current_device())
_init_cublas_workspace(dev)
gc.collect()
_reset_mem_stats(dev)
mem_stats = torch.cuda.memory_stats(dev)
pre_cuda_active = mem_stats["active_bytes.all.current"]
torch.manual_seed(42)
lin_dim, bsz = 2048, 8
model = nn.Sequential(*[MLP(lin_dim, dev) for _ in range(3)])
for mlp in model:
fully_shard(mlp)
fully_shard(model)
optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True)
torch.manual_seed(42 + self.rank)
inp = torch.randn((bsz, lin_dim), device=dev)
fmt = FSDPMemTracker(model, optim)
fmt.track_inputs((inp,))
with fmt:
for iter_idx in range(2):
nonroot_loss = model[0](inp).sum()
nonroot_loss.backward()
optim.step()
optim.zero_grad()
if iter_idx == 0:
fmt.reset_mod_stats()
mem_stats = torch.cuda.memory_stats()
tracker_max = fmt.get_tracker_snapshot("peak")[dev]["Total"]
cuda_max = mem_stats["active_bytes.all.peak"] - pre_cuda_active
accuracy = tracker_max / cuda_max
if self.rank == 0 and debug:
print(f"Accuracy: {accuracy} Tracker Max:{tracker_max} CUDA Max:{cuda_max}")
self.assertAlmostEqual(
accuracy,
1.0,
delta=0.1,
msg=f"Tracker Max:{tracker_max} CUDA Max:{cuda_max}",
)
del inp
del model
del optim
class TestTrackerFullyShard1DTrainingCompose(FSDPTest):
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 4)
@skip_if_lt_x_gpu(2)
def test_tracker_with_activation_checkpointing(self):
"""
Tests tracker accuracy when composing with activation checkpointing.
"""
self.run_subtests(
{
"reshard_after_forward": [True, False],
"checkpoint_impl": ["composable", "wrapper"],
},
self._test_tracker_with_activation_checkpointing,
)
def _test_tracker_with_activation_checkpointing(
self, reshard_after_forward: Union[bool, int], checkpoint_impl: str
):
assert checkpoint_impl in ("composable", "wrapper")
debug = False
dev = torch.device(torch.cuda.current_device())
_init_cublas_workspace(dev)
gc.collect()
_reset_mem_stats(dev)
mem_stats = torch.cuda.memory_stats(dev)
pre_cuda_active = mem_stats["active_bytes.all.current"]
torch.manual_seed(42)
vocab_size = 8192
bsz, seq_len = 16, 512
with torch.device(dev):
model_args = ModelArgs(
n_layers=4,
n_heads=4,
vocab_size=vocab_size,
max_seq_len=seq_len,
dropout_p=0.1,
)
model = Transformer(model_args)
foreach = False
fully_shard_fn = functools.partial(
fully_shard,
reshard_after_forward=reshard_after_forward,
)
if checkpoint_impl == "wrapper":
apply_activation_checkpointing(
model, check_fn=lambda m: isinstance(m, TransformerBlock)
)
for module in model.modules():
# Apply to `CheckpointWrapper`, which wraps `TransformerBlock`
if isinstance(module, CheckpointWrapper):
fully_shard_fn(module)
else:
for module in model.modules():
if isinstance(module, TransformerBlock):
if checkpoint_impl == "composable":
checkpoint(module)
fully_shard_fn(module)
fully_shard_fn(model)
optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=foreach)
torch.manual_seed(42 + self.rank)
inp = torch.randint(0, vocab_size, (bsz, seq_len), device=dev)
fmt = FSDPMemTracker(model, optim)
fmt.track_inputs((inp,))
with fmt:
for iter_idx in range(2):
loss = model(inp).sum()
loss.backward()
optim.step()
optim.zero_grad()
if iter_idx == 0:
fmt.reset_mod_stats()
mem_stats = torch.cuda.memory_stats()
tracker_max = fmt.get_tracker_snapshot("peak")[dev]["Total"]
cuda_max = mem_stats["active_bytes.all.peak"] - pre_cuda_active
accuracy = tracker_max / cuda_max
if self.rank == 0 and debug:
print(f"Accuracy: {accuracy} Tracker Max:{tracker_max} CUDA Max:{cuda_max}")
self.assertAlmostEqual(
accuracy,
1.0,
delta=0.1,
msg=f"Tracker Max:{tracker_max} CUDA Max:{cuda_max}",
)
del inp
del model
del optim
if __name__ == "__main__":
run_tests()
|
import functools
import gc
from typing import Union
import torch
import torch.nn as nn
from torch.distributed._composable import checkpoint
from torch.distributed._composable.fsdp import (
CPUOffloadPolicy,
fully_shard,
MixedPrecisionPolicy,
OffloadPolicy,
)
from torch.distributed._tensor import init_device_mesh
from torch.distributed._tools.fsdp2_mem_tracker import FSDPMemTracker
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
apply_activation_checkpointing,
CheckpointWrapper,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
ModelArgs,
Transformer,
TransformerBlock,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_fsdp2_mem_tracker.py
|
test_tracker_multi_group_eager
|
def test_tracker_multi_group_eager(self):
"""
Tests tracker accuracy when using multiple parameter groups for
communication (for communication and computation overlap plus memory
reduction) and different mixed precision policies.
"""
self.run_subtests(
{
"reshard_after_forward": [True, False],
"offload_policy": [
CPUOffloadPolicy(pin_memory=False),
OffloadPolicy(),
],
"mp_policy": [
MixedPrecisionPolicy(
param_dtype=torch.float16, reduce_dtype=torch.float32
),
],
},
self._test_tracker_multi_group,
)
|
import functools
import gc
from typing import Union
import torch
import torch.nn as nn
from torch.distributed._composable import checkpoint
from torch.distributed._composable.fsdp import (
CPUOffloadPolicy,
fully_shard,
MixedPrecisionPolicy,
OffloadPolicy,
)
from torch.distributed._tensor import init_device_mesh
from torch.distributed._tools.fsdp2_mem_tracker import FSDPMemTracker
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
apply_activation_checkpointing,
CheckpointWrapper,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
ModelArgs,
Transformer,
TransformerBlock,
)
class TestTrackerFullyShard1DTrainingCore(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_fsdp2_mem_tracker.py
|
test_tracker_non_root_forward_backward
|
def test_tracker_non_root_forward_backward(self):
"""
Tests tracker accracy when running forward/backward through a non-root.
"""
debug = False
dev = torch.device(torch.cuda.current_device())
_init_cublas_workspace(dev)
gc.collect()
_reset_mem_stats(dev)
mem_stats = torch.cuda.memory_stats(dev)
pre_cuda_active = mem_stats["active_bytes.all.current"]
torch.manual_seed(42)
lin_dim, bsz = 2048, 8
model = nn.Sequential(*[MLP(lin_dim, dev) for _ in range(3)])
for mlp in model:
fully_shard(mlp)
fully_shard(model)
optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True)
torch.manual_seed(42 + self.rank)
inp = torch.randn((bsz, lin_dim), device=dev)
fmt = FSDPMemTracker(model, optim)
fmt.track_inputs((inp,))
with fmt:
for iter_idx in range(2):
nonroot_loss = model[0](inp).sum()
nonroot_loss.backward()
optim.step()
optim.zero_grad()
if iter_idx == 0:
fmt.reset_mod_stats()
mem_stats = torch.cuda.memory_stats()
tracker_max = fmt.get_tracker_snapshot("peak")[dev]["Total"]
cuda_max = mem_stats["active_bytes.all.peak"] - pre_cuda_active
accuracy = tracker_max / cuda_max
if self.rank == 0 and debug:
print(f"Accuracy: {accuracy} Tracker Max:{tracker_max} CUDA Max:{cuda_max}")
self.assertAlmostEqual(
accuracy,
1.0,
delta=0.1,
msg=f"Tracker Max:{tracker_max} CUDA Max:{cuda_max}",
)
del inp
del model
del optim
|
import functools
import gc
from typing import Union
import torch
import torch.nn as nn
from torch.distributed._composable import checkpoint
from torch.distributed._composable.fsdp import (
CPUOffloadPolicy,
fully_shard,
MixedPrecisionPolicy,
OffloadPolicy,
)
from torch.distributed._tensor import init_device_mesh
from torch.distributed._tools.fsdp2_mem_tracker import FSDPMemTracker
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
apply_activation_checkpointing,
CheckpointWrapper,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
ModelArgs,
Transformer,
TransformerBlock,
)
class TestTrackerFullyShard1DTrainingCore(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_fsdp2_mem_tracker.py
|
test_tracker_with_activation_checkpointing
|
def test_tracker_with_activation_checkpointing(self):
"""
Tests tracker accuracy when composing with activation checkpointing.
"""
self.run_subtests(
{
"reshard_after_forward": [True, False],
"checkpoint_impl": ["composable", "wrapper"],
},
self._test_tracker_with_activation_checkpointing,
)
|
import functools
import gc
from typing import Union
import torch
import torch.nn as nn
from torch.distributed._composable import checkpoint
from torch.distributed._composable.fsdp import (
CPUOffloadPolicy,
fully_shard,
MixedPrecisionPolicy,
OffloadPolicy,
)
from torch.distributed._tensor import init_device_mesh
from torch.distributed._tools.fsdp2_mem_tracker import FSDPMemTracker
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
apply_activation_checkpointing,
CheckpointWrapper,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, MLP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
ModelArgs,
Transformer,
TransformerBlock,
)
class TestTrackerFullyShard1DTrainingCompose(FSDPTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mem_tracker.py
|
_init_cublas_workspace
|
def _init_cublas_workspace(self, dev: torch.device):
lin = torch.nn.Linear(768, 768, device=dev)
inp = torch.randn(1, 768, device=dev)
lin(inp).sum().backward()
del lin
del inp
|
import gc
import unittest
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._tools.mem_tracker import MemTracker
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
from torch.utils.checkpoint import checkpoint
class TestMemTracker(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mem_tracker.py
|
__init__
|
def __init__(self, n_layers: int, dim: int, dtype: torch.dtype):
super().__init__()
self.linears = nn.ModuleList()
for _ in range(n_layers):
self.linears.append(nn.Linear(dim, dim, dtype=dtype))
self.linears.append(nn.ReLU())
|
import gc
import unittest
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._tools.mem_tracker import MemTracker
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
from torch.utils.checkpoint import checkpoint
class DummyModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mem_tracker.py
|
forward
|
def forward(self, x):
for layer in self.linears:
x = layer(x)
return x
|
import gc
import unittest
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._tools.mem_tracker import MemTracker
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
from torch.utils.checkpoint import checkpoint
class DummyModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mem_tracker.py
|
__init__
|
def __init__(self, n_layers: int, dim: int, dtype: torch.dtype):
super().__init__()
self.linears = nn.ModuleList()
for _ in range(n_layers):
self.linears.append(nn.Linear(dim, dim, dtype=dtype))
self.linears.append(nn.ReLU())
|
import gc
import unittest
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._tools.mem_tracker import MemTracker
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
from torch.utils.checkpoint import checkpoint
class DummyModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mem_tracker.py
|
forward
|
def forward(self, x):
for layer in self.linears:
x = layer(x)
return x
|
import gc
import unittest
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._tools.mem_tracker import MemTracker
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
from torch.utils.checkpoint import checkpoint
class DummyModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mem_tracker.py
|
forward
|
def forward(self, x):
for layer in self.linears:
x = layer(x)
return x
|
import gc
import unittest
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._tools.mem_tracker import MemTracker
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
from torch.utils.checkpoint import checkpoint
class DummyModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mem_tracker.py
|
test_tracker_attribution
|
def test_tracker_attribution(self):
"""
Tests that the tracker correctly categorizes params, gradients, and optimizer states.
"""
dev = torch.device(torch.get_default_device())
gc.collect(1)
bsz, n_layers, dim, dtype = 16, 3, 128, torch.float32
def get_param_grad_optstate_actual_bytes(
model: nn.Module, opt: torch.optim.Optimizer
) -> Tuple[int, int, int]:
param_bytes = 0
grad_bytes = 0
opt_state_bytes = 0
for param in model.parameters():
if param.device == dev:
param_bytes += param.numel() * param.element_size()
if param.grad is not None and param.grad.device == dev:
grad_bytes += param.grad.numel() * param.grad.element_size()
for state in opt.state.values():
for v in state.values():
if isinstance(v, torch.Tensor) and v.device == dev:
opt_state_bytes += v.numel() * v.element_size()
return param_bytes, grad_bytes, opt_state_bytes
def get_param_grad_optstate_bytes_from_tracker(
tracker: MemTracker,
) -> Tuple[int, int, int]:
snapshot = tracker.get_tracker_snapshot()
param_bytes = snapshot[dev]["Parameter"]
grad_bytes = snapshot[dev]["Gradient"]
opt_state_bytes = snapshot[dev]["Optstate"]
return param_bytes, grad_bytes, opt_state_bytes
def test_attribution_equivalence(
mt: MemTracker,
model: nn.Module,
opt: torch.optim.Optimizer,
) -> None:
actual = get_param_grad_optstate_actual_bytes(model, opt)
tracker = get_param_grad_optstate_bytes_from_tracker(mt)
for a, b in zip(actual, tracker):
if a == 0:
self.assertEqual(b, 0)
else:
self.assertAlmostEqual(b / a, 1.0, delta=0.1)
class DummyModel(nn.Module):
def __init__(self, n_layers: int, dim: int, dtype: torch.dtype):
super().__init__()
self.MLP_layers = nn.ModuleList()
for _ in range(n_layers):
self.MLP_layers.extend(
[nn.Linear(dim, 2 * dim, dtype=dtype), nn.GELU()]
)
self.MLP_layers.extend(
[nn.Linear(2 * dim, dim, dtype=dtype), nn.GELU()]
)
def forward(self, x):
for layer in self.MLP_layers:
x = layer(x)
return x
with torch.device(dev):
model = DummyModel(n_layers, dim, dtype=dtype)
optim = torch.optim.Adam(model.parameters(), foreach=True)
mem_tracker = MemTracker()
mem_tracker.track_external(model, optim)
with mem_tracker as mt:
input_batch = torch.randn(bsz, dim, device=dev, dtype=dtype)
# Before forward: Only parameters and input are allocated
test_attribution_equivalence(mt, model, optim)
output = model(input_batch)
output.sum().backward()
# After backward: Gradients are allocated
test_attribution_equivalence(mt, model, optim)
output = None
optim.step()
# After step: Optimizer state is allocated
test_attribution_equivalence(mt, model, optim)
optim.zero_grad()
# After zero_grad: Gradients are deallocated
test_attribution_equivalence(mt, model, optim)
|
import gc
import unittest
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._tools.mem_tracker import MemTracker
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
from torch.utils.checkpoint import checkpoint
class TestMemTracker(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mem_tracker.py
|
__init__
|
def __init__(self, n_layers: int, dim: int, dtype: torch.dtype):
super().__init__()
self.linears = nn.ModuleList()
for _ in range(n_layers):
self.linears.append(nn.Linear(dim, dim, dtype=dtype))
self.linears.append(nn.ReLU())
|
import gc
import unittest
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._tools.mem_tracker import MemTracker
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
from torch.utils.checkpoint import checkpoint
class DummyModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mem_tracker.py
|
forward
|
def forward(self, x):
for layer in self.linears:
x = layer(x)
return x
|
import gc
import unittest
from typing import Tuple
import torch
import torch.nn as nn
from torch.distributed._tools.mem_tracker import MemTracker
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
from torch.utils.checkpoint import checkpoint
class DummyModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mod_tracker.py
|
test_module_hierarchy
|
def test_module_hierarchy(self):
seen_fw = []
seen_bw = []
class Foo(torch.nn.Module):
def forward(self, x):
x = x["a"].relu_()
seen_fw.append((copy(tracker.parents), tracker.is_bw))
x.register_hook(
lambda grad: seen_bw.append((copy(tracker.parents), tracker.is_bw))
)
return {"a": torch.mm(x, x)}
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = Foo()
self.b = torch.nn.ModuleDict({"nest": Foo()})
self.c = torch.nn.ModuleList([Foo()])
def forward(self, x):
x = self.c[0](x)
return self.b["nest"](self.a(x))
mod = Mod()
with ModTracker() as tracker:
mod({"a": torch.randn(10, 10, requires_grad=True).clone()})[
"a"
].sum().backward()
mod({"a": torch.randn(10, 10, requires_grad=True).clone()})[
"a"
].sum().backward()
self.assertEqual(
seen_fw,
[
({"Global", "Mod", "Mod.c.0"}, False),
({"Global", "Mod", "Mod.a"}, False),
({"Global", "Mod", "Mod.b.nest"}, False),
({"Global", "Mod", "Mod.c.0"}, False),
({"Global", "Mod", "Mod.a"}, False),
({"Global", "Mod", "Mod.b.nest"}, False),
],
)
self.assertEqual(
seen_bw,
[
({"Global", "Mod", "Mod.b.nest"}, True),
({"Global", "Mod", "Mod.a"}, True),
({"Global", "Mod", "Mod.c.0"}, True),
({"Global", "Mod", "Mod.b.nest"}, True),
({"Global", "Mod", "Mod.a"}, True),
({"Global", "Mod", "Mod.c.0"}, True),
],
)
|
from copy import copy
import torch
from torch.distributed._tools.mod_tracker import ModTracker
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
class TestModTracker(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mod_tracker.py
|
forward
|
def forward(self, x):
x = x["a"].relu_()
seen_fw.append((copy(tracker.parents), tracker.is_bw))
x.register_hook(
lambda grad: seen_bw.append((copy(tracker.parents), tracker.is_bw))
)
return {"a": torch.mm(x, x)}
|
from copy import copy
import torch
from torch.distributed._tools.mod_tracker import ModTracker
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
class Foo(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mod_tracker.py
|
forward
|
def forward(self, x):
x = x["a"].relu_()
seen_fw.append((copy(tracker.parents), tracker.is_bw))
x.register_hook(
lambda grad: seen_bw.append((copy(tracker.parents), tracker.is_bw))
)
return {"a": torch.mm(x, x)}
|
from copy import copy
import torch
from torch.distributed._tools.mod_tracker import ModTracker
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
class Foo(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mod_tracker.py
|
forward
|
def forward(self, x):
x = x["a"].relu_()
seen_fw.append((copy(tracker.parents), tracker.is_bw))
x.register_hook(
lambda grad: seen_bw.append((copy(tracker.parents), tracker.is_bw))
)
return {"a": torch.mm(x, x)}
|
from copy import copy
import torch
from torch.distributed._tools.mod_tracker import ModTracker
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
class Foo(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mod_tracker.py
|
hook
|
def hook(mod, hook_name):
mfqn = mt.get_known_fqn(mod) if mod is not None else None
test_op.append((hook_name, mfqn, mfqn in mt.parents, mt.is_bw))
mod = Bar()
mt.register_user_hooks(
lambda m, inp: hook(m, "pre_fw"),
lambda m, inp, op: hook(m, "post_fw"),
lambda m, gop: hook(m, "pre_bw"),
lambda m, ginp: hook(m, "post_bw"),
)
with mt:
mod(torch.rand(10, 10, requires_grad=True)).sum().backward()
expected_op = [
("pre_fw", "Bar", True, False),
("pre_fw", "Bar.foo", True, False),
("post_fw", "Bar.foo", True, False),
("post_fw", "Bar", True, False),
("pre_bw", "Bar", True, True),
("pre_bw", "Bar.foo", True, True),
("post_bw", "Bar", True, True),
("post_bw", "Bar.foo", True, True),
]
self.assertEqual(test_op, expected_op)
with self.assertRaises(AssertionError):
mt.register_user_hooks(lambda x, y: x, None, None, None)
test_op.clear()
with mt:
loss = mod(torch.rand(10, 10, requires_grad=True)).sum()
del mod
loss.backward()
expected_op = [
("pre_fw", "Bar", True, False),
("pre_fw", "Bar.foo", True, False),
("post_fw", "Bar.foo", True, False),
("post_fw", "Bar", True, False),
("pre_bw", None, False, True),
("pre_bw", None, False, True),
("post_bw", None, False, True),
("post_bw", None, False, True),
]
self.assertEqual(test_op, expected_op)
|
from copy import copy
import torch
from torch.distributed._tools.mod_tracker import ModTracker
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mod_tracker.py
|
test_ac
|
def test_ac(self):
class Foo(torch.nn.Module):
def __init__(self, n_layers: int, dim: int, use_ac: bool = False):
super().__init__()
self.linears = torch.nn.ModuleList()
self.use_ac = use_ac
for _ in range(n_layers):
self.linears.append(torch.nn.Linear(dim, dim))
def forward(self, x):
for i, block in enumerate(self.linears):
if i >= 1 and self.use_ac:
x = checkpoint(
block, x, preserve_rng_state=True, use_reentrant=False
)
else:
x = block(x)
assert x is not None
x = torch.nn.functional.relu(x)
return x
bsz = 2
dim = 8
n_layers = 2
test_op = []
def hook(mod, mt, hook_name):
mfqn = mt.get_known_fqn(mod) if mod is not None else None
test_op.append((hook_name, mfqn, mfqn in mt.parents, mt.is_bw))
mt = ModTracker()
mt.register_user_hooks(
lambda m, i: hook(m, mt, "pre_fw"),
lambda m, i, o: hook(m, mt, "post_fw"),
lambda m, go: hook(m, mt, "pre_bw"),
lambda m, gi: hook(m, mt, "post_bw"),
)
model = Foo(n_layers, dim, True)
x = torch.randn(bsz, dim)
with mt:
model(x).sum().backward()
expected_op = [
("pre_fw", "Foo", True, False),
("pre_fw", "Foo.linears.0", True, False),
("post_fw", "Foo.linears.0", True, False),
("pre_fw", "Foo.linears.1", True, False),
("post_fw", "Foo.linears.1", True, False),
("post_fw", "Foo", True, False),
("pre_bw", "Foo", True, True),
("pre_bw", "Foo.linears.1", True, True),
("pre_fw", "Foo.linears.1", True, True),
("post_fw", "Foo.linears.1", True, True),
("post_bw", "Foo.linears.1", True, True),
("pre_bw", "Foo.linears.0", True, True),
]
self.assertEqual(test_op, expected_op)
|
from copy import copy
import torch
from torch.distributed._tools.mod_tracker import ModTracker
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
class TestModTracker(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mod_tracker.py
|
__init__
|
def __init__(self) -> None:
super().__init__()
self.a = Foo()
self.b = torch.nn.ModuleDict({"nest": Foo()})
self.c = torch.nn.ModuleList([Foo()])
|
from copy import copy
import torch
from torch.distributed._tools.mod_tracker import ModTracker
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
class Mod(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mod_tracker.py
|
forward
|
def forward(self, x):
x = x["a"].relu_()
seen_fw.append((copy(tracker.parents), tracker.is_bw))
x.register_hook(
lambda grad: seen_bw.append((copy(tracker.parents), tracker.is_bw))
)
return {"a": torch.mm(x, x)}
|
from copy import copy
import torch
from torch.distributed._tools.mod_tracker import ModTracker
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
class Foo(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_mod_tracker.py
|
hook
|
def hook(mod, hook_name):
mfqn = mt.get_known_fqn(mod) if mod is not None else None
test_op.append((hook_name, mfqn, mfqn in mt.parents, mt.is_bw))
mod = Bar()
mt.register_user_hooks(
lambda m, inp: hook(m, "pre_fw"),
lambda m, inp, op: hook(m, "post_fw"),
lambda m, gop: hook(m, "pre_bw"),
lambda m, ginp: hook(m, "post_bw"),
)
with mt:
mod(torch.rand(10, 10, requires_grad=True)).sum().backward()
expected_op = [
("pre_fw", "Bar", True, False),
("pre_fw", "Bar.foo", True, False),
("post_fw", "Bar.foo", True, False),
("post_fw", "Bar", True, False),
("pre_bw", "Bar", True, True),
("pre_bw", "Bar.foo", True, True),
("post_bw", "Bar", True, True),
("post_bw", "Bar.foo", True, True),
]
self.assertEqual(test_op, expected_op)
with self.assertRaises(AssertionError):
mt.register_user_hooks(lambda x, y: x, None, None, None)
test_op.clear()
with mt:
loss = mod(torch.rand(10, 10, requires_grad=True)).sum()
del mod
loss.backward()
expected_op = [
("pre_fw", "Bar", True, False),
("pre_fw", "Bar.foo", True, False),
("post_fw", "Bar.foo", True, False),
("post_fw", "Bar", True, False),
("pre_bw", None, False, True),
("pre_bw", None, False, True),
("post_bw", None, False, True),
("post_bw", None, False, True),
]
self.assertEqual(test_op, expected_op)
|
from copy import copy
import torch
from torch.distributed._tools.mod_tracker import ModTracker
from torch.testing._internal.common_utils import run_tests, TestCase, xfailIfTorchDynamo
from torch.utils.checkpoint import checkpoint
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_runtime_estimator.py
|
__init__
|
def __init__(self, conv_args: ConvArgs):
super().__init__()
image_size = conv_args.image_size
num_classes = conv_args.num_classes
self.image_size = image_size
self.conv1 = nn.Conv2d(3, 32, kernel_size=5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3)
self.conv4 = nn.Conv2d(128, 256, kernel_size=3)
self.fc1_size = self._calculate_fc1_size()
self.fc1 = nn.Linear(self.fc1_size, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, num_classes)
|
import unittest
from dataclasses import dataclass
from typing import Any, Callable, cast, Tuple, Union
import torch
from torch import nn, optim
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.distributed._tools.runtime_estimator import RuntimeEstimator
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import (
ModelArgs,
Transformer,
)
class SimpleCNN(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tensor/test_view_ops.py
|
call_dt_test
|
def call_dt_test(self, op, args, kwargs, device_mesh: DeviceMesh):
spec = ops[op]
rules = spec.dim_map(*args, **kwargs)
outputs = op(*args, **kwargs)
flat_args, _ = tree_flatten(args)
in_shape = flat_args[0].shape
no_shard_dims = set()
for rule in rules:
if isinstance(rule, Repeat):
if isinstance(rule.input_dim, InputDim):
no_shard_dims.add(rule.input_dim.input_dim)
elif isinstance(rule, Flatten):
for dim in rule.input_dims[1:]:
if isinstance(dim, InputDim):
no_shard_dims.add(dim.input_dim)
elif isinstance(rule, Split):
if isinstance(rule.input_dim, Flatten):
for dim in rule.input_dim.input_dims[1:]:
if isinstance(dim, InputDim):
no_shard_dims.add(dim.input_dim)
if op == torch.unbind:
no_shard_dims.add(kwargs.get("dim", 0))
sharding_choices = cast(List[Placement], [Replicate()]) + [
Shard(i) for i, s in enumerate(in_shape) if s > 1 and i not in no_shard_dims
]
all_sharding_choices = itertools.product(
*(device_mesh.ndim * [sharding_choices])
)
for in_shard in all_sharding_choices:
# print(f' |--- {in_shard}')
in_dt = distribute_tensor(args[0], device_mesh, in_shard)
with redistribute_profiler() as profiler:
out_dt = op(in_dt, *args[1:], **kwargs)
self.assertEqual(profiler.num_calls, 0, "Expected no redistribution.")
full_out = out_dt.redistribute(
device_mesh, device_mesh.ndim * [Replicate()]
).to_local()
if dist.get_rank() == 0:
self.assertEqual(outputs, full_out)
|
def call_dt_test(self, op, args, kwargs, device_mesh: DeviceMesh):
dim_map = dim_maps[op]
rules = dim_map(*args, **kwargs)
outputs = op(*args, **kwargs)
flat_args = pytree.arg_tree_leaves(*args)
in_shape = flat_args[0].shape
no_shard_dims = set()
for rule in rules:
if isinstance(rule, Repeat):
if isinstance(rule.input_dim, InputDim):
no_shard_dims.add(rule.input_dim.input_dim)
elif isinstance(rule, Flatten):
for dim in rule.input_dims[1:]:
if isinstance(dim, InputDim):
no_shard_dims.add(dim.input_dim)
elif isinstance(rule, Split):
if isinstance(rule.input_dim, Flatten):
for dim in rule.input_dim.input_dims[1:]:
if isinstance(dim, InputDim):
no_shard_dims.add(dim.input_dim)
if op == torch.unbind:
no_shard_dims.add(kwargs.get("dim", 0))
sharding_choices = cast(List[Placement], [Replicate()]) + [
Shard(i) for i, s in enumerate(in_shape) if s > 1 and i not in no_shard_dims
]
all_sharding_choices = itertools.product(
*(device_mesh.ndim * [sharding_choices])
)
for in_shard in all_sharding_choices:
in_dt = distribute_tensor(args[0], device_mesh, in_shard)
comm_mode = CommDebugMode()
with comm_mode:
out_dt = op(in_dt, *args[1:], **kwargs)
self.assertEqual(
comm_mode.get_total_counts(), 0, "Expected no redistribution."
)
full_out = out_dt.full_tensor()
if dist.get_rank() == 0:
self.assertEqual(outputs, full_out)
|
import itertools
from typing import cast, List
import torch
import torch.distributed as dist
from torch import rand, randn, Tensor
from torch.distributed._tensor import DeviceMesh, distribute_tensor, Replicate, Shard
from torch.distributed._tensor.ops.view_ops import (
Broadcast,
Flatten,
InputDim,
ops,
Repeat,
Singleton,
Split,
view_groups,
)
from torch.distributed._tensor.placement_types import Placement
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
redistribute_profiler,
with_comms,
)
from torch.utils._pytree import tree_flatten
class TestViewOps(DTensorTestBase):
|
import itertools
from typing import cast, List
import torch
import torch.distributed as dist
from torch import rand, randn, Tensor
from torch.distributed._tensor import (
DeviceMesh,
distribute_tensor,
init_device_mesh,
Replicate,
Shard,
)
from torch.distributed._tensor.placement_types import Placement
from torch.distributed.tensor._ops._view_ops import (
Broadcast,
dim_maps,
Flatten,
InputDim,
Repeat,
Singleton,
Split,
view_groups,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
from torch.utils import _pytree as pytree
class TestViewOps(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/_tensor/test_view_ops.py
|
test_dtensor_view_op_uneven
|
if __name__ == "__main__":
run_tests()
|
def test_dtensor_view_op_uneven(self):
"""
Test two uneven cases for view op:
1) the sharded tensor dim is 1 so that only the first rank has an non-empty shard.
2) the sharded tensor dim is uneven such that some ranks have full shards,
smaller non-empty shards, and empty shards.
"""
dim0_sizes = [1, self.world_size + 1]
for dim0_size in dim0_sizes:
p = torch.randn(dim0_size, 2, 2, 2)
mesh = init_device_mesh(self.device_type, (self.world_size,))
dtensor = distribute_tensor(p, mesh, [Shard(0)])
with CommDebugMode() as comm_mode:
view = dtensor.view(dim0_size, 2, 4)
self.assertEqual(len(comm_mode.get_comm_counts()), 0)
# when no communication happens, the data pointer should be the same.
self.assertEqual(
view.to_local().data_ptr(), dtensor.to_local().data_ptr()
)
view = dtensor.view(dim0_size, 4, 2)
self.assertEqual(
view.to_local().data_ptr(), dtensor.to_local().data_ptr()
)
self.assertEqual(len(comm_mode.get_comm_counts()), 0)
view = dtensor.view(dim0_size, 8)
self.assertEqual(
view.to_local().data_ptr(), dtensor.to_local().data_ptr()
)
self.assertEqual(len(comm_mode.get_comm_counts()), 0)
view = dtensor.view(dtensor.shape)
self.assertEqual(
view.to_local().data_ptr(), dtensor.to_local().data_ptr()
)
self.assertEqual(len(comm_mode.get_comm_counts()), 0)
|
import itertools
from typing import cast, List
import torch
import torch.distributed as dist
from torch import rand, randn, Tensor
from torch.distributed._tensor import (
DeviceMesh,
distribute_tensor,
init_device_mesh,
Replicate,
Shard,
)
from torch.distributed._tensor.placement_types import Placement
from torch.distributed.tensor._ops._view_ops import (
Broadcast,
dim_maps,
Flatten,
InputDim,
Repeat,
Singleton,
Split,
view_groups,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
from torch.utils import _pytree as pytree
class TestViewOps(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/_tensor/test_xla_integration.py
|
forward
|
def forward(self, x):
y = self.relu(self.fc1(x))
z = self.fc2(y)
return z
|
import os
import unittest
from functools import wraps
from typing import Any, Callable, Dict, Tuple
import numpy as np
import torch
from torch import nn
from torch.distributed._tensor import (
DeviceMesh,
distribute_module,
distribute_tensor,
Replicate,
Shard,
)
from torch.testing._internal.common_utils import run_tests, TestCase
import torch_xla # type:ignore[import] # noqa: F401
class SimpleLinear(nn.Module):
import torch_xla.runtime as xr # type:ignore[import]
import torch_xla.runtime as xr # type:ignore[import]
import torch_xla.runtime as xr # type:ignore[import]
import torch_xla # type:ignore[import]
import torch_xla.core.xla_model as xm # type:ignore[import]
import torch_xla.runtime as xr # type:ignore[import]
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
state_dict
|
def state_dict(self):
return {"data": self.data}
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestStatefulObj:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
load_state_dict
|
def load_state_dict(self, state_dict):
self.data = state_dict["data"]
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestStatefulObj:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
state_dict
|
def state_dict(self):
return {"data": self.data}
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestStatefulObj:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
load_state_dict
|
def load_state_dict(self, state_dict):
self.data = state_dict["data"]
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestStatefulObj:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
test_overwrite
|
def test_overwrite(self):
t1, t2 = torch.randn(10), torch.randn(10)
DCP.save({"random": t1}, checkpoint_id=self.temp_dir)
DCP.save(
{"random": t2},
storage_writer=DCP.FileSystemWriter(self.temp_dir, overwrite=True),
)
sd = {"random": torch.zeros(10)}
DCP.load(sd, checkpoint_id=self.temp_dir)
self.assertTrue(torch.allclose(sd["random"], t2))
with self.assertRaisesRegex(
CheckpointException, ".*Checkpoint already exists.*"
):
DCP.save(
{"random": t2},
storage_writer=DCP.FileSystemWriter(self.temp_dir, overwrite=False),
)
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestE2ESaveAndLoad(DTensorTestBase, VerifyStateDictMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
backend
|
def backend(self):
return "cpu:gloo,cuda:nccl"
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestE2ESaveAndLoad(DTensorTestBase, VerifyStateDictMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
test_no_cpu
|
def test_no_cpu(self):
with self.assertRaisesRegex(
AssertionError, r"A CPU backend must be enabled for async save;.*?"
):
f = saver.async_save({})
f.result()
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestNoCPU(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
test_init_state_dict
|
def test_init_state_dict(self):
temp_dir = self.temp_dir
model = TestDummyModel()
optim = torch.optim.Adam(model.parameters(), lr=0.1)
state_dict_to_save = {
"model": get_model_state_dict(model),
"optimizer": get_optimizer_state_dict(model, optim),
}
DCP.save(state_dict_to_save, checkpoint_id=temp_dir)
torch.manual_seed(0)
model_2 = TestDummyModel()
# Changing the learning rate for optimizer, which is not a tensor.
optim_2 = torch.optim.Adam(model_2.parameters(), lr=0.2)
msd = get_model_state_dict(model_2)
osd = get_optimizer_state_dict(model_2, optim_2)
state_dict_to_load = {"model": msd, "optimizer": osd}
DCP.load(state_dict_to_load, checkpoint_id=temp_dir)
# We need to check that the two variables point to the same object in memory,
# since we claim DCP is in-place loading.
self.assertTrue(msd is state_dict_to_load["model"])
self.assertTrue(osd is state_dict_to_load["optimizer"])
# set_state_dict calls load_state_dict for model and optimizer.
# so we should see the optim_2.param_groups learning rate is 0.1 instead of 0.2 now.
set_state_dict(
model_2,
optim_2,
model_state_dict=state_dict_to_load["model"],
optim_state_dict=state_dict_to_load["optimizer"],
)
self.assertEqual(msd, get_model_state_dict(model_2))
self.assertEqual(osd, get_optimizer_state_dict(model_2, optim_2))
self.assertEqual(optim_2.param_groups[0]["lr"], 0.1)
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestInitStateDict(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
load_state_dict
|
def load_state_dict(self, state_dict):
self.data = state_dict["data"]
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestStatefulObj:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
__eq__
|
def __eq__(self, other):
return torch.equal(self.data, other.data)
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestStatefulObj:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
__eq__
|
def __eq__(self, other):
return torch.equal(self.data, other.data)
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestStatefulObj:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
_train
|
def _train(model, optim, train_steps=1):
torch.manual_seed(0)
loss = None
train_state = TestTrainState()
for _ in range(train_steps):
loss = model(model.get_input()).sum()
loss.backward()
# We usually sync the loss across dp ranks in real training.
# This is just simulating for testing purpose.
train_state.step += 1
train_state.current_loss = torch.rand(1).item()
train_state.losses.append(train_state.current_loss)
optim.step()
optim.zero_grad()
return loss, train_state
class TestE2ESaveAndLoad(DTensorTestBase, VerifyStateDictMixin):
@property
def backend(self):
return "cpu:gloo,cuda:nccl"
def _create_model(self, compile, model_type, state_dict_options=None):
dummy_model = TestDummyModel().cuda()
assert model_type in ModelType, f"{model_type} is not supported."
if model_type == ModelType.FSDP:
device_mesh = init_device_mesh(self.device_type, (self.world_size,))
model = FSDP(
dummy_model,
device_mesh=device_mesh,
use_orig_params=True,
)
elif model_type == ModelType.HSDP:
device_mesh = init_device_mesh(self.device_type, (2, self.world_size // 2))
model = FSDP(
dummy_model,
device_mesh=device_mesh,
use_orig_params=True,
sharding_strategy=ShardingStrategy.HYBRID_SHARD,
)
elif model_type == ModelType.FSDP_TP:
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
tp_mesh = mesh_2d["tp"]
dp_mesh = mesh_2d["dp"]
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model = parallelize_module(dummy_model, tp_mesh, parallelize_plan)
model = FSDP(model, device_mesh=dp_mesh, use_orig_params=True)
elif model_type == ModelType.DDP:
model = DistributedDataParallel(dummy_model)
model.get_input = partial(TestDummyModel.get_input, model)
else:
model = dummy_model
if compile:
# TODO: enable dynamic=True when dynamic shape support is enabled.
# model = torch.compile(model)
model = torch.compile(model, dynamic=False)
optim = self._optim(model)
if model_type is not ModelType.NONE:
_patch_model_state_dict(model, options=state_dict_options)
_patch_optimizer_state_dict(
model, optimizers=optim, options=state_dict_options
)
return model, optim
def _optim(self, model):
return torch.optim.Adam(model.parameters(), lr=0.1)
@with_comms
@skip_if_lt_x_gpu(4)
@with_temp_dir
@parametrize("compile", [True, False])
# TODO: Previously PairwiseParallel does not shard properly, passing ModelType.FSDP_TP test where it
# should have failed. Disabling the failed test temporarily to unblock the deprecation of PairwiseParallel.
@parametrize("model_type", [ModelType.FSDP, ModelType.HSDP, ModelType.DDP])
def test_e2e(self, compile, model_type):
self._run_e2e_test(compile, model_type)
@with_comms
@skip_if_lt_x_gpu(4)
@with_temp_dir
@parametrize("cache_staged_state_dict", [False, True])
def test_e2e_async_cached(self, cache_staged_state_dict):
self._run_e2e_test(
compile=False,
model_type=ModelType.FSDP,
async_op=True,
cache_staged_state_dict=cache_staged_state_dict,
)
def _run_e2e_test(
self, compile, model_type, async_op=False, cache_staged_state_dict=False
):
model, optim = self._create_model(compile, ModelType.NONE)
_train(model, optim, train_steps=2)
dist_model, dist_optim = self._create_model(compile, model_type)
_, original_train_state = _train(dist_model, dist_optim, train_steps=2)
original_stateful_obj = TestStatefulObj() # tests arbitrary saving/loading
sd = {
"model": dist_model,
"optimizer": dist_optim,
"s": original_stateful_obj,
"train_state": original_train_state,
}
if async_op:
writer = DCP.FileSystemWriter(
self.temp_dir, cache_staged_state_dict=cache_staged_state_dict
)
f = saver.async_save(sd, storage_writer=writer)
t = time.monotonic()
while not f.done():
time.sleep(1)
print(f"still waiting... {time.monotonic() - t}")
f.result()
else:
DCP.save(sd, checkpoint_id=self.temp_dir)
loaded_stateful_obj = TestStatefulObj()
loaded_train_state = TestTrainState()
dist_model, dist_optim = self._create_model(compile, model_type)
DCP.load(
state_dict={
"model": dist_model,
"optimizer": dist_optim,
"s": loaded_stateful_obj,
"train_state": loaded_train_state,
},
checkpoint_id=self.temp_dir,
)
self.assertEqual(original_stateful_obj, loaded_stateful_obj)
self.assertEqual(original_train_state, loaded_train_state)
# train one more step on both models
loss, _ = _train(model, optim, train_steps=1)
dist_loss, _ = _train(dist_model, dist_optim, train_steps=1)
self.assertEqual(loss, dist_loss)
dist_msd, dist_osd = get_state_dict(dist_model, optimizers=dist_optim)
model_sd, optim_sd = get_state_dict(model, optimizers=optim)
self._verify_msd(model_sd, dist_msd)
self._verify_osd_by_load(model, optim, self._optim(model), dist_osd)
@with_comms
@with_temp_dir
@skip_if_lt_x_gpu(4)
def test_different_ordered_state_dict_keys(self):
"""Tests that the order of keys in the state dict does not matter when loading
If order was not accounted for, the following test would cause a deadlock.
"""
world_size = self.world_size
class Foo:
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
tl = [
torch.ones(2, dtype=torch.int64, device="cuda")
for _ in range(world_size)
]
t = (
torch.arange(2, dtype=torch.int64, device="cuda")
+ 1
+ 2 * dist.get_rank()
)
dist.all_gather(tl, t, async_op=False)
class Bar:
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
tensor = (
torch.arange(2, dtype=torch.int64, device="cuda")
+ 1
+ 2 * dist.get_rank()
)
dist.all_reduce(tensor, op=ReduceOp.SUM)
if self.rank == 0:
sd = {
"A": Foo(),
"B": Bar(),
}
else:
sd = {
"B": Bar(),
"A": Foo(),
}
DCP.save(sd, checkpoint_id=self.temp_dir)
DCP.load(sd, checkpoint_id=self.temp_dir)
@with_temp_dir
def test_no_dist(self):
# since comm's are not initialized in this method, `no_dist`
# is assumed False
DCP.save({}, checkpoint_id=self.temp_dir)
DCP.load({}, checkpoint_id=self.temp_dir)
@with_comms
@skip_if_lt_x_gpu(4)
@with_temp_dir
def test_partial_load(self):
model, optim = self._create_model(compile=False, model_type=ModelType.NONE)
_train(model, optim, train_steps=2)
dist_model, dist_optim = self._create_model(
compile=False, model_type=ModelType.FSDP
)
_train(dist_model, dist_optim, train_steps=2)
DCP.save(
{"model": dist_model, "optimizer": dist_optim}, checkpoint_id=self.temp_dir
)
dist_model, _ = self._create_model(compile=False, model_type=ModelType.FSDP)
DCP.load({"model": dist_model}, checkpoint_id=self.temp_dir)
dist_msd = get_model_state_dict(dist_model)
model_sd = get_model_state_dict(model)
self._verify_msd(model_sd, dist_msd)
# another way
loaded_model_sd = _load_state_dict_from_keys(
"model", checkpoint_id=self.temp_dir
)["model"]
self._verify_msd(model_sd, loaded_model_sd, offload_to_cpu=True)
loaded_optim_state = _load_state_dict_from_keys(
"optimizer.state", checkpoint_id=self.temp_dir
)["optimizer"]["state"]
self.assertNotIn("param_groups", loaded_optim_state)
for k, v in dist_optim.state_dict()["state"].items():
for optim_key in ["exp_avg", "exp_avg_sq", "step"]:
self._compare_tensor(
loaded_optim_state[k][optim_key], v[optim_key], offload_to_cpu=True
)
@with_comms
@skip_if_lt_x_gpu(4)
@with_temp_dir
def test_overwrite(self):
t1, t2 = torch.randn(10), torch.randn(10)
DCP.save({"random": t1}, checkpoint_id=self.temp_dir)
DCP.save(
{"random": t2},
storage_writer=DCP.FileSystemWriter(self.temp_dir, overwrite=True),
)
sd = {"random": torch.zeros(10)}
DCP.load(sd, checkpoint_id=self.temp_dir)
self.assertTrue(torch.allclose(sd["random"], t2))
with self.assertRaisesRegex(
CheckpointException, ".*Checkpoint already exists.*"
):
DCP.save(
{"random": t2},
storage_writer=DCP.FileSystemWriter(self.temp_dir, overwrite=False),
)
class TestNoCPU(DTensorTestBase):
@property
def backend(self):
return "nccl"
@with_comms
def test_no_cpu(self):
with self.assertRaisesRegex(
AssertionError, r"A CPU backend must be enabled for async save;.*?"
):
f = saver.async_save({})
f.result()
class TestInitStateDict(DTensorTestBase):
@with_temp_dir
def test_init_state_dict(self):
temp_dir = self.temp_dir
model = TestDummyModel()
optim = torch.optim.Adam(model.parameters(), lr=0.1)
state_dict_to_save = {
"model": get_model_state_dict(model),
"optimizer": get_optimizer_state_dict(model, optim),
}
DCP.save(state_dict_to_save, checkpoint_id=temp_dir)
torch.manual_seed(0)
model_2 = TestDummyModel()
# Changing the learning rate for optimizer, which is not a tensor.
optim_2 = torch.optim.Adam(model_2.parameters(), lr=0.2)
msd = get_model_state_dict(model_2)
osd = get_optimizer_state_dict(model_2, optim_2)
state_dict_to_load = {"model": msd, "optimizer": osd}
DCP.load(state_dict_to_load, checkpoint_id=temp_dir)
# We need to check that the two variables point to the same object in memory,
# since we claim DCP is in-place loading.
self.assertTrue(msd is state_dict_to_load["model"])
self.assertTrue(osd is state_dict_to_load["optimizer"])
# set_state_dict calls load_state_dict for model and optimizer.
# so we should see the optim_2.param_groups learning rate is 0.1 instead of 0.2 now.
set_state_dict(
model_2,
optim_2,
model_state_dict=state_dict_to_load["model"],
optim_state_dict=state_dict_to_load["optimizer"],
)
self.assertEqual(msd, get_model_state_dict(model_2))
self.assertEqual(osd, get_optimizer_state_dict(model_2, optim_2))
self.assertEqual(optim_2.param_groups[0]["lr"], 0.1)
instantiate_parametrized_tests(TestE2ESaveAndLoad)
if __name__ == "__main__":
run_tests()
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
backend
|
def backend(self):
return "cpu:gloo,cuda:nccl"
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestE2ESaveAndLoad(DTensorTestBase, VerifyStateDictMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
_create_model
|
def _create_model(self, compile, model_type, state_dict_options=None):
dummy_model = TestDummyModel().cuda()
assert model_type in ModelType, f"{model_type} is not supported."
if model_type == ModelType.FSDP:
device_mesh = init_device_mesh(self.device_type, (self.world_size,))
model = FSDP(
dummy_model,
device_mesh=device_mesh,
use_orig_params=True,
)
elif model_type == ModelType.HSDP:
device_mesh = init_device_mesh(self.device_type, (2, self.world_size // 2))
model = FSDP(
dummy_model,
device_mesh=device_mesh,
use_orig_params=True,
sharding_strategy=ShardingStrategy.HYBRID_SHARD,
)
elif model_type == ModelType.FSDP_TP:
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
tp_mesh = mesh_2d["tp"]
dp_mesh = mesh_2d["dp"]
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model = parallelize_module(dummy_model, tp_mesh, parallelize_plan)
model = FSDP(model, device_mesh=dp_mesh, use_orig_params=True)
elif model_type == ModelType.DDP:
model = DistributedDataParallel(dummy_model)
model.get_input = partial(TestDummyModel.get_input, model)
else:
model = dummy_model
if compile:
# TODO: enable dynamic=True when dynamic shape support is enabled.
# model = torch.compile(model)
model = torch.compile(model, dynamic=False)
optim = self._optim(model)
if model_type is not ModelType.NONE:
_patch_model_state_dict(model, options=state_dict_options)
_patch_optimizer_state_dict(
model, optimizers=optim, options=state_dict_options
)
return model, optim
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestE2ESaveAndLoad(DTensorTestBase, VerifyStateDictMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
test_e2e_async_cached
|
def test_e2e_async_cached(self, cache_staged_state_dict):
self._run_e2e_test(
compile=False,
model_type=ModelType.FSDP,
async_op=True,
cache_staged_state_dict=cache_staged_state_dict,
)
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestE2ESaveAndLoad(DTensorTestBase, VerifyStateDictMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/test_2d_fsdp_dt_checkpoint.py
|
__init__
|
def __init__(self):
super().__init__()
self.net1 = torch.nn.Linear(5, 8)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(8, 4)
self.net3 = torch.nn.Linear(4, 12)
|
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dist_cp
import torch.distributed.distributed_c10d as distributed_c10d
import torch.nn.functional as F
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed.checkpoint.default_planner import (
DefaultSavePlanner,
DefaultLoadPlanner,
)
from torch.distributed.checkpoint.optimizer import (
load_sharded_optimizer_state_dict,
)
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.parallel import (
PairwiseParallel,
parallelize_module,
)
from torch.distributed.tensor.parallel.fsdp import enable_2d_with_fsdp
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
TP_DEGREE = 2
LR = 3e-5
class SimpleModel(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/checkpoint/test_2d_fsdp_dt_checkpoint.py
|
forward
|
def forward(self, x):
x = F.relu(self.net1(x))
x = F.relu(self.net2(x))
x = F.relu(self.net3(x))
return x
|
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dist_cp
import torch.distributed.distributed_c10d as distributed_c10d
import torch.nn.functional as F
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed.checkpoint.default_planner import (
DefaultSavePlanner,
DefaultLoadPlanner,
)
from torch.distributed.checkpoint.optimizer import (
load_sharded_optimizer_state_dict,
)
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.parallel import (
PairwiseParallel,
parallelize_module,
)
from torch.distributed.tensor.parallel.fsdp import enable_2d_with_fsdp
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
TP_DEGREE = 2
LR = 3e-5
class SimpleModel(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/checkpoint/test_2d_fsdp_dt_checkpoint.py
|
create_new_dist_group
|
def create_new_dist_group():
world_size = dist.get_world_size()
group1 = [i for i in range(world_size) if i % 2 == 0]
group2 = [i for i in range(world_size) if i % 2 != 0]
# create new fsdp group for resharding
fsdp_0 = dist.new_group(ranks=group1)
fsdp_1 = dist.new_group(ranks=group2)
if dist.get_rank() % 2 == 0:
my_fsdp = fsdp_0
else:
my_fsdp = fsdp_1
return my_fsdp
|
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dist_cp
import torch.distributed.distributed_c10d as distributed_c10d
import torch.nn.functional as F
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed.checkpoint.default_planner import (
DefaultSavePlanner,
DefaultLoadPlanner,
)
from torch.distributed.checkpoint.optimizer import (
load_sharded_optimizer_state_dict,
)
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.parallel import (
PairwiseParallel,
parallelize_module,
)
from torch.distributed.tensor.parallel.fsdp import enable_2d_with_fsdp
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
TP_DEGREE = 2
LR = 3e-5
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/checkpoint/test_2d_fsdp_dt_checkpoint.py
|
opt_at
|
def opt_at(opt, idx):
return list(opt.state.values())[idx]
# Adam lazily creates its state
self.assertEqual(
opt_at(optim, 0)["exp_avg"], opt_at(optim_2, 0)["exp_avg"]
)
self.assertEqual(
opt_at(optim, 0)["exp_avg_sq"], opt_at(optim_2, 0)["exp_avg_sq"]
)
|
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dist_cp
import torch.distributed.distributed_c10d as distributed_c10d
import torch.nn.functional as F
from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed.checkpoint.default_planner import (
DefaultSavePlanner,
DefaultLoadPlanner,
)
from torch.distributed.checkpoint.optimizer import (
load_sharded_optimizer_state_dict,
)
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.parallel import (
PairwiseParallel,
parallelize_module,
)
from torch.distributed.tensor.parallel.fsdp import enable_2d_with_fsdp
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
TP_DEGREE = 2
LR = 3e-5
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/_tools/test_runtime_estimator.py
|
_calculate_fc1_size
|
def _calculate_fc1_size(self):
size = self.image_size
size = (size - 5 + 1) // 2 # conv1 and pool
size = (size - 5 + 1) // 2 # conv2 and pool
size = size - 3 + 1 # conv3
size = (size - 3 + 1) // 2 # conv4 and pool
return 512 * size * size
|
import unittest
from dataclasses import dataclass
from typing import Any, Callable, cast, Tuple, Union
import torch
from torch import nn, optim
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.distributed._tools.runtime_estimator import RuntimeEstimator
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import (
ModelArgs,
Transformer,
)
class SimpleCNN(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/_tools/test_runtime_estimator.py
|
forward
|
def forward(self, x):
x = self.pool(nn.functional.relu(self.conv1(x)))
x = self.pool(nn.functional.relu(self.conv2(x)))
x = nn.functional.relu(self.conv3(x))
x = self.pool(nn.functional.relu(self.conv4(x)))
x = x.view(-1, self.fc1_size)
x = nn.functional.relu(self.fc1(x))
x = nn.functional.relu(self.fc2(x))
x = self.fc3(x)
return x
|
import unittest
from dataclasses import dataclass
from typing import Any, Callable, cast, Tuple, Union
import torch
from torch import nn, optim
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.distributed._tools.runtime_estimator import RuntimeEstimator
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import (
ModelArgs,
Transformer,
)
class SimpleCNN(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py
|
gpus_for_rank
|
def gpus_for_rank(world_size):
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class Task(nn.Module):
def __init__(self):
super().__init__()
torch.manual_seed(0)
self.p = nn.Parameter(torch.randn(40, 20))
def forward(self, x):
return self.p * x
class TestDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x ** (1 + rank))
class DistributedDataParallelCommHookTest(MultiProcessTestCase):
def setUp(self):
super().setUp()
self._spawn_processes()
def tearDown(self):
try:
os.remove(self.file_name)
except OSError:
pass
def _get_process_group_nccl(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
)
return dist.distributed_c10d._get_default_group()
@property
def world_size(self):
return 2
def _local_model(self):
local_model = TestDdpCommHook().cpu()
return local_model
def _get_grads(self, process_group, hook_type=None):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
TestDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
# Register DDP Communication Hook if defined
if hook_type is not None:
register_ddp_comm_hook(
comm_hook_type=hook_type, model=gpu_model, state=process_group
)
return self._run_and_get_grads(gpu_model)
def _run_and_get_grads(self, model):
torch.manual_seed(2020)
input = torch.randn(40, 20)
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
# The only layer
param = next(model.parameters())
return param.grad
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook(self):
"""
This unit test verifies the ``allreduce`` hook registered case gives same result
with no hook registered case.
"""
process_group = self._get_process_group_nccl()
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(process_group, DDPCommHookType.ALLREDUCE)
torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=0)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_fp16compress_hook(self):
"""
This unit test verifies the ``fp16 compress`` hook registered case
gives close result with no hook registered case.
"""
process_group = self._get_process_group_nccl()
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(process_group, DDPCommHookType.FP16_COMPRESS)
torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=1e-4)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_quantize_per_tensor_hook(self):
"""
This unit test verifies the ``quantize per tensor`` hook registered case
gives close result with no hook registered case.
"""
process_group = self._get_process_group_nccl()
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(process_group, DDPCommHookType.QUANTIZE_PER_TENSOR)
torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=1e-4)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_quantize_per_channel_hook(self):
"""
This unit test verifies the ``quantize per channel`` hook registered case
gives close result with no hook registered case.
"""
process_group = self._get_process_group_nccl()
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(
process_group, DDPCommHookType.QUANTIZE_PER_CHANNEL
)
torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=1e-4)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_noop_hook(self):
"""
This unit test verifies the ``noop`` hook registered case and a subsequent allreduce
gives same result with no hook registered case.
"""
process_group = self._get_process_group_nccl()
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(process_group, DDPCommHookType.NOOP)
# Apply a subsequent allreduce to average grads.
hook_grads.div_(self.world_size)
dist.all_reduce(hook_grads, group=process_group)
torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=0)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_is_last_hook(self):
process_group = self._get_process_group_nccl()
def hook(flags, bucket):
flags.append(bucket.is_last())
fut = torch.futures.Future()
fut.set_result(bucket.buffer())
return fut
flags = []
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = nn.Sequential(
nn.Linear(2, 4000, bias=False),
*[nn.Linear(4000, 4000, bias=False) for _ in range(10)]
)
gpu_model = DistributedDataParallel(
model.to(device_id),
device_ids=[device_id],
process_group=process_group,
)
gpu_model.register_comm_hook(state=flags, hook=hook)
input = torch.randn(10, 2)
gpu_model(input).sum().backward()
self.assertTrue(flags[-1])
self.assertFalse(any(flags[:-1]))
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
def gpus_for_rank(world_size):
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class Task(nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(0)
self.p = nn.Parameter(torch.randn(40, 20))
def forward(self, x):
return self.p * x
class TestDdpCommHook(nn.Module):
def __init__(self) -> None:
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x ** (1 + rank))
class DistributedDataParallelCommHookTest(MultiProcessTestCase):
def setUp(self):
super().setUp()
self._spawn_processes()
def tearDown(self):
try:
os.remove(self.file_name)
except OSError:
pass
def _get_process_group_nccl(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
)
return dist.distributed_c10d._get_default_group()
@property
def world_size(self):
return 2
def _local_model(self):
local_model = TestDdpCommHook().cpu()
return local_model
def _get_grads(self, process_group, hook_type=None):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
TestDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
# Register DDP Communication Hook if defined
if hook_type is not None:
register_ddp_comm_hook(
comm_hook_type=hook_type, model=gpu_model, state=process_group
)
return self._run_and_get_grads(gpu_model)
def _run_and_get_grads(self, model):
torch.manual_seed(2020)
input = torch.randn(40, 20)
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
# The only layer
param = next(model.parameters())
return param.grad
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook(self):
"""
This unit test verifies the ``allreduce`` hook registered case gives same result
with no hook registered case.
"""
process_group = self._get_process_group_nccl()
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(process_group, DDPCommHookType.ALLREDUCE)
torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=0)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_fp16compress_hook(self):
"""
This unit test verifies the ``fp16 compress`` hook registered case
gives close result with no hook registered case.
"""
process_group = self._get_process_group_nccl()
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(process_group, DDPCommHookType.FP16_COMPRESS)
torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=1e-4)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_quantize_per_tensor_hook(self):
"""
This unit test verifies the ``quantize per tensor`` hook registered case
gives close result with no hook registered case.
"""
process_group = self._get_process_group_nccl()
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(process_group, DDPCommHookType.QUANTIZE_PER_TENSOR)
torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=1e-4)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_quantize_per_channel_hook(self):
"""
This unit test verifies the ``quantize per channel`` hook registered case
gives close result with no hook registered case.
"""
process_group = self._get_process_group_nccl()
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(
process_group, DDPCommHookType.QUANTIZE_PER_CHANNEL
)
torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=1e-4)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_noop_hook(self):
"""
This unit test verifies the ``noop`` hook registered case and a subsequent allreduce
gives same result with no hook registered case.
"""
process_group = self._get_process_group_nccl()
# No hook registered case, get the reference grads.
reference_grads = self._get_grads(process_group, None)
# Register hook case, get the hook grads.
hook_grads = self._get_grads(process_group, DDPCommHookType.NOOP)
# Apply a subsequent allreduce to average grads.
hook_grads.div_(self.world_size)
dist.all_reduce(hook_grads, group=process_group)
torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=0)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_is_last_hook(self):
process_group = self._get_process_group_nccl()
def hook(flags, bucket):
flags.append(bucket.is_last())
fut = torch.futures.Future()
fut.set_result(bucket.buffer())
return fut
flags = []
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = nn.Sequential(
nn.Linear(2, 4000, bias=False),
*[nn.Linear(4000, 4000, bias=False) for _ in range(10)],
)
gpu_model = DistributedDataParallel(
model.to(device_id),
device_ids=[device_id],
process_group=process_group,
)
gpu_model.register_comm_hook(state=flags, hook=hook)
input = torch.randn(10, 2)
gpu_model(input).sum().backward()
self.assertTrue(flags[-1])
self.assertFalse(any(flags[:-1]))
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
import os
import sys
import torch
from torch import nn
import torch.distributed as dist
from torch.distributed.algorithms.ddp_comm_hooks import (
DDPCommHookType,
register_ddp_comm_hook,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
|
import os
import sys
import torch
import torch.distributed as dist
from torch import nn
from torch.distributed.algorithms.ddp_comm_hooks import (
DDPCommHookType,
register_ddp_comm_hook,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py
|
__init__
|
def __init__(self):
super().__init__()
torch.manual_seed(0)
self.p = nn.Parameter(torch.randn(40, 20))
|
def __init__(self) -> None:
super().__init__()
torch.manual_seed(0)
self.p = nn.Parameter(torch.randn(40, 20))
|
import os
import sys
import torch
from torch import nn
import torch.distributed as dist
from torch.distributed.algorithms.ddp_comm_hooks import (
DDPCommHookType,
register_ddp_comm_hook,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
class Task(nn.Module):
|
import os
import sys
import torch
import torch.distributed as dist
from torch import nn
from torch.distributed.algorithms.ddp_comm_hooks import (
DDPCommHookType,
register_ddp_comm_hook,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
class Task(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py
|
__init__
|
def __init__(self):
super().__init__()
torch.manual_seed(0)
self.p = nn.Parameter(torch.randn(40, 20))
|
def __init__(self) -> None:
super().__init__()
torch.manual_seed(0)
self.p = nn.Parameter(torch.randn(40, 20))
|
import os
import sys
import torch
from torch import nn
import torch.distributed as dist
from torch.distributed.algorithms.ddp_comm_hooks import (
DDPCommHookType,
register_ddp_comm_hook,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
class Task(nn.Module):
|
import os
import sys
import torch
import torch.distributed as dist
from torch import nn
from torch.distributed.algorithms.ddp_comm_hooks import (
DDPCommHookType,
register_ddp_comm_hook,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
class Task(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/algorithms/quantization/test_quantization.py
|
_build_tensor
|
def _build_tensor(size, value=None, dtype=torch.float, device_id=None):
if value is None:
value = size
if device_id is None:
return torch.empty(size, dtype=dtype).fill_(value)
else:
return torch.empty(size, dtype=dtype).fill_(value).cuda(device_id)
if TEST_WITH_DEV_DBG_ASAN:
print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
if NO_MULTIPROCESSING_SPAWN:
print("Spawn not available, skipping tests.", file=sys.stderr)
sys.exit(0)
BACKEND = os.environ["BACKEND"]
if BACKEND == "gloo" or BACKEND == "nccl":
class DistQuantizationTests(MultiProcessTestCase):
def setUp(self):
super().setUp()
self._spawn_processes()
torch.backends.cudnn.flags(enabled=True, allow_tf32=False).__enter__()
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return int(os.environ["WORLD_SIZE"])
@requires_gloo()
@sandcastle_skip_if(BACKEND != "gloo", "Only gloo backend supports all_gather_fp16")
def test_all_gather_fp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='gloo')
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.group.WORLD
self._test_all_gather(group, group_id, self.rank, dtype=torch.float32, qtype=DQuantType.FP16)
@requires_gloo()
@sandcastle_skip_if(BACKEND != "gloo", "Only gloo backend supports all_gather_fp16")
def test_all_gather_bfp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='gloo')
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.group.WORLD
self._test_all_gather(group, group_id, self.rank, dtype=torch.float32, qtype=DQuantType.BFP16)
@requires_nccl()
@sandcastle_skip_if(BACKEND != "nccl", "Only nccl backend supports all_to_all_fp16")
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
@skip_if_rocm
def test_all_to_all_fp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.new_group(range(self.world_size))
rank_to_GPU = init_multigpu_helper(self.world_size, BACKEND)
self._test_all_to_all(
group,
group_id,
self.rank,
cuda=True,
rank_to_GPU=rank_to_GPU,
dtype=torch.float32,
qtype=DQuantType.FP16)
@requires_nccl()
@sandcastle_skip_if(BACKEND != "nccl", "Only nccl backend supports all_to_all_fp16")
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
@skip_if_rocm
def test_all_to_all_bfp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.new_group(range(self.world_size))
rank_to_GPU = init_multigpu_helper(self.world_size, BACKEND)
self._test_all_to_all(
group,
group_id,
self.rank,
cuda=True,
rank_to_GPU=rank_to_GPU,
dtype=torch.float32,
qtype=DQuantType.BFP16)
@requires_nccl()
@sandcastle_skip_if(BACKEND != "nccl", "Only nccl backend supports all_to_all_single_fp16")
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_all_to_all_single_fp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.new_group(range(self.world_size))
rank_to_GPU = init_multigpu_helper(self.world_size, BACKEND)
self._test_all_to_all_single(
group,
group_id,
self.rank,
cuda=True,
rank_to_GPU=rank_to_GPU,
dtype=torch.float32,
qtype=DQuantType.FP16
)
@requires_nccl()
@sandcastle_skip_if(BACKEND != "nccl", "Only nccl backend supports all_to_all_single_bfp16")
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_all_to_all_single_bfp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(store=store, rank=self.rank, world_size=self.world_size, backend='nccl')
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.new_group(range(self.world_size))
rank_to_GPU = init_multigpu_helper(self.world_size, BACKEND)
self._test_all_to_all_single(
group,
group_id,
self.rank,
cuda=True,
rank_to_GPU=rank_to_GPU,
dtype=torch.float32,
qtype=DQuantType.BFP16
)
def _test_all_gather(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float, qtype=None):
for dest in group:
tensor = _build_tensor([dest + 1, dest + 1], rank, dtype=dtype)
tensors = [_build_tensor([dest + 1, dest + 1], -1, dtype=dtype) for i in group]
expected_tensors = [
_build_tensor([dest + 1, dest + 1], i, dtype=dtype) for i in group
]
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
if tensors[0].dtype == torch.complex64:
tensor_shapes = [torch.view_as_real(tensors[0]).shape]
else:
tensor_shapes = [tensors[0].shape]
allgather = quant.auto_quantize(dist.all_gather, qtype, quant_loss=None)
allgather(tensors, tensor, group=group_id, async_op=False)
for t1, t2 in zip(tensors, expected_tensors):
self.assertEqual(t1, t2)
def _test_all_to_all(
self,
group,
group_id,
rank,
cuda=False,
rank_to_GPU=None,
dtype=torch.float,
qtype=None
):
if group_id is not None:
size = len(group)
in_splits = [i + 1 for i in group]
in_tensors = [
torch.ones([in_splits[i], size], dtype=dtype) * rank
for i, _ in enumerate(group)
]
out_tensors = [
torch.ones([(rank + 1), size], dtype=dtype) for _ in group
]
expected_tensors = [
torch.ones([rank + 1, size], dtype=dtype) * i for i in group
]
if cuda:
in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors]
expected_tensors = [
t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors
]
out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors]
quantize_alltoall = quant.auto_quantize(dist.all_to_all, qtype, quant_loss=None)
quantize_alltoall(out_tensors, in_tensors, group=group_id)
for t1, t2 in zip(out_tensors, expected_tensors):
self.assertEqual(t1, t2)
def _test_all_to_all_single(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float, qtype=DQuantType.FP16
):
if group_id is not None:
size = len(group)
in_splits = [i + 1 for i in group]
out_splits = [rank + 1 for _ in group]
in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank
out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype)
expected_tensor = torch.cat(
[torch.ones([rank + 1, size], dtype=dtype) * i for i in group]
)
if cuda:
rank_to_GPU = rank_to_GPU[rank][0]
in_tensor = in_tensor.cuda(rank_to_GPU)
expected_tensor = expected_tensor.cuda(rank_to_GPU)
out_tensor = out_tensor.cuda(rank_to_GPU)
quantize_alltoall_single = quant.auto_quantize(dist.all_to_all_single, qtype, quant_loss=None)
quantize_alltoall_single(out_tensor, in_tensor, out_splits=out_splits, in_splits=in_splits, group=group_id)
self.assertEqual(out_tensor, expected_tensor)
if __name__ == "__main__":
run_tests()
|
def _build_tensor(size, value=None, dtype=torch.float, device_id=None):
if value is None:
value = size
if device_id is None:
return torch.empty(size, dtype=dtype).fill_(value)
else:
return torch.empty(size, dtype=dtype).fill_(value).cuda(device_id)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
if NO_MULTIPROCESSING_SPAWN:
print("Spawn not available, skipping tests.", file=sys.stderr)
sys.exit(0)
BACKEND = os.environ["BACKEND"]
if BACKEND == "gloo" or BACKEND == "nccl":
class DistQuantizationTests(MultiProcessTestCase):
def setUp(self):
super().setUp()
self._spawn_processes()
torch.backends.cudnn.flags(enabled=True, allow_tf32=False).__enter__()
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return int(os.environ["WORLD_SIZE"])
@requires_gloo()
@skip_but_pass_in_sandcastle_if(
BACKEND != "gloo", "Only gloo backend supports all_gather_fp16"
)
def test_all_gather_fp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend="gloo"
)
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.group.WORLD
self._test_all_gather(
group, group_id, self.rank, dtype=torch.float32, qtype=DQuantType.FP16
)
@requires_gloo()
@skip_but_pass_in_sandcastle_if(
BACKEND != "gloo", "Only gloo backend supports all_gather_fp16"
)
def test_all_gather_bfp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend="gloo"
)
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.group.WORLD
self._test_all_gather(
group, group_id, self.rank, dtype=torch.float32, qtype=DQuantType.BFP16
)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only nccl backend supports all_to_all_fp16"
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
@skip_if_rocm_multiprocess
def test_all_to_all_fp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend="nccl"
)
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.new_group(range(self.world_size))
rank_to_GPU = init_multigpu_helper(self.world_size, BACKEND)
self._test_all_to_all(
group,
group_id,
self.rank,
cuda=True,
rank_to_GPU=rank_to_GPU,
dtype=torch.float32,
qtype=DQuantType.FP16,
)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only nccl backend supports all_to_all_fp16"
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
@skip_if_rocm_multiprocess
def test_all_to_all_bfp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend="nccl"
)
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.new_group(range(self.world_size))
rank_to_GPU = init_multigpu_helper(self.world_size, BACKEND)
self._test_all_to_all(
group,
group_id,
self.rank,
cuda=True,
rank_to_GPU=rank_to_GPU,
dtype=torch.float32,
qtype=DQuantType.BFP16,
)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only nccl backend supports all_to_all_single_fp16"
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_all_to_all_single_fp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend="nccl"
)
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.new_group(range(self.world_size))
rank_to_GPU = init_multigpu_helper(self.world_size, BACKEND)
self._test_all_to_all_single(
group,
group_id,
self.rank,
cuda=True,
rank_to_GPU=rank_to_GPU,
dtype=torch.float32,
qtype=DQuantType.FP16,
)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(
BACKEND != "nccl", "Only nccl backend supports all_to_all_single_bfp16"
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_all_to_all_single_bfp16(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
store=store, rank=self.rank, world_size=self.world_size, backend="nccl"
)
device = torch.device(f"cuda:{self.rank}")
group = list(range(0, self.world_size))
group_id = dist.new_group(range(self.world_size))
rank_to_GPU = init_multigpu_helper(self.world_size, BACKEND)
self._test_all_to_all_single(
group,
group_id,
self.rank,
cuda=True,
rank_to_GPU=rank_to_GPU,
dtype=torch.float32,
qtype=DQuantType.BFP16,
)
def _test_all_gather(
self,
group,
group_id,
rank,
cuda=False,
rank_to_GPU=None,
dtype=torch.float,
qtype=None,
):
for dest in group:
tensor = _build_tensor([dest + 1, dest + 1], rank, dtype=dtype)
tensors = [
_build_tensor([dest + 1, dest + 1], -1, dtype=dtype) for i in group
]
expected_tensors = [
_build_tensor([dest + 1, dest + 1], i, dtype=dtype) for i in group
]
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
if tensors[0].dtype == torch.complex64:
tensor_shapes = [torch.view_as_real(tensors[0]).shape]
else:
tensor_shapes = [tensors[0].shape]
allgather = quant.auto_quantize(dist.all_gather, qtype, quant_loss=None)
allgather(tensors, tensor, group=group_id, async_op=False)
for t1, t2 in zip(tensors, expected_tensors):
self.assertEqual(t1, t2)
def _test_all_to_all(
self,
group,
group_id,
rank,
cuda=False,
rank_to_GPU=None,
dtype=torch.float,
qtype=None,
):
if group_id is not None:
size = len(group)
in_splits = [i + 1 for i in group]
in_tensors = [
torch.ones([in_splits[i], size], dtype=dtype) * rank
for i, _ in enumerate(group)
]
out_tensors = [
torch.ones([(rank + 1), size], dtype=dtype) for _ in group
]
expected_tensors = [
torch.ones([rank + 1, size], dtype=dtype) * i for i in group
]
if cuda:
in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors]
expected_tensors = [
t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors
]
out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors]
quantize_alltoall = quant.auto_quantize(
dist.all_to_all, qtype, quant_loss=None
)
quantize_alltoall(out_tensors, in_tensors, group=group_id)
for t1, t2 in zip(out_tensors, expected_tensors):
self.assertEqual(t1, t2)
def _test_all_to_all_single(
self,
group,
group_id,
rank,
cuda=False,
rank_to_GPU=None,
dtype=torch.float,
qtype=DQuantType.FP16,
):
if group_id is not None:
size = len(group)
in_splits = [i + 1 for i in group]
out_splits = [rank + 1 for _ in group]
in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank
out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype)
expected_tensor = torch.cat(
[torch.ones([rank + 1, size], dtype=dtype) * i for i in group]
)
if cuda:
rank_to_GPU = rank_to_GPU[rank][0]
in_tensor = in_tensor.cuda(rank_to_GPU)
expected_tensor = expected_tensor.cuda(rank_to_GPU)
out_tensor = out_tensor.cuda(rank_to_GPU)
quantize_alltoall_single = quant.auto_quantize(
dist.all_to_all_single, qtype, quant_loss=None
)
quantize_alltoall_single(
out_tensor,
in_tensor,
out_splits=out_splits,
in_splits=in_splits,
group=group_id,
)
self.assertEqual(out_tensor, expected_tensor)
if __name__ == "__main__":
run_tests()
|
import torch
import os
import torch.cuda
import sys
import torch.distributed as dist
import torch.distributed.algorithms._quantization.quantization as quant
from torch.distributed.algorithms._quantization.quantization import DQuantType
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
init_multigpu_helper,
requires_gloo,
skip_if_rocm,
skip_if_lt_x_gpu,
requires_nccl,
)
from torch.testing._internal.common_utils import sandcastle_skip_if, run_tests, TEST_WITH_DEV_DBG_ASAN, NO_MULTIPROCESSING_SPAWN
torch.backends.cuda.matmul.allow_tf32 = False
|
import os
import sys
import torch
import torch.cuda
import torch.distributed as dist
import torch.distributed.algorithms._quantization.quantization as quant
from torch.distributed.algorithms._quantization.quantization import DQuantType
from torch.testing._internal.common_distributed import (
init_multigpu_helper,
MultiProcessTestCase,
requires_gloo,
requires_nccl,
skip_if_lt_x_gpu,
skip_if_rocm_multiprocess,
)
from torch.testing._internal.common_utils import (
NO_MULTIPROCESSING_SPAWN,
run_tests,
skip_but_pass_in_sandcastle_if,
TEST_WITH_DEV_DBG_ASAN,
)
torch.backends.cuda.matmul.allow_tf32 = False
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/checkpoint/e2e/test_fine_tuning.py
|
forward
|
def forward(self, batch):
x = self.relu(self.layer1(batch))
x = self.relu(self.layer2(x))
x = self.relu(self.layer3(x))
x = self.sequential(x)
x = self.module_list[1](self.module_list[0](x))
return x
|
import os
import sys
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dist_cp
import torch.nn as nn
from torch.distributed._tensor import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_state_dict,
set_model_state_dict,
set_state_dict,
StateDictOptions,
)
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
DIM = 500
class PreTrainedModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_fine_tuning.py
|
forward
|
def forward(self, batch):
x = self.relu(self.layer1(batch))
x = self.relu(self.layer2(x))
x = self.relu(self.layer3(x))
x = self.sequential(x)
x = self.module_list[1](self.module_list[0](x))
return x
|
import os
import sys
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dist_cp
import torch.nn as nn
from torch.distributed._tensor import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_state_dict,
set_model_state_dict,
set_state_dict,
StateDictOptions,
)
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
DIM = 500
class PreTrainedModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_fine_tuning.py
|
backend
|
def backend(self):
return "cpu:gloo,cuda:nccl"
|
import os
import sys
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dist_cp
import torch.nn as nn
from torch.distributed._tensor import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
get_model_state_dict,
get_state_dict,
set_model_state_dict,
set_state_dict,
StateDictOptions,
)
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
DIM = 500
class TestFineTuning(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_fsdp_ep.py
|
forward
|
def forward(self, x):
raise NotImplementedError
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DTensor
from torch.distributed.checkpoint.state_dict import get_state_dict
from torch.distributed.device_mesh import _mesh_resources, init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class Dummymodel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_fsdp_ep.py
|
__init__
|
def __init__(self) -> None:
super().__init__()
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DTensor
from torch.distributed.checkpoint.state_dict import get_state_dict
from torch.distributed.device_mesh import _mesh_resources, init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class Dummymodel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_fsdp_ep.py
|
forward
|
def forward(self, x):
raise NotImplementedError
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DTensor
from torch.distributed.checkpoint.state_dict import get_state_dict
from torch.distributed.device_mesh import _mesh_resources, init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class Dummymodel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_fsdp_ep.py
|
__init__
|
def __init__(self) -> None:
super().__init__()
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DTensor
from torch.distributed.checkpoint.state_dict import get_state_dict
from torch.distributed.device_mesh import _mesh_resources, init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class Dummymodel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_fsdp_ep.py
|
forward
|
def forward(self, x):
raise NotImplementedError
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DTensor
from torch.distributed.checkpoint.state_dict import get_state_dict
from torch.distributed.device_mesh import _mesh_resources, init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class Dummymodel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_fsdp_ep.py
|
__init__
|
def __init__(self) -> None:
super().__init__()
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DTensor
from torch.distributed.checkpoint.state_dict import get_state_dict
from torch.distributed.device_mesh import _mesh_resources, init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class Dummymodel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_fsdp_ep.py
|
forward
|
def forward(self, x):
raise NotImplementedError
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DTensor
from torch.distributed.checkpoint.state_dict import get_state_dict
from torch.distributed.device_mesh import _mesh_resources, init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class Dummymodel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
forward
|
def forward(self, x):
x = F.relu(self.net1(x))
x = F.relu(self.net2(x))
x = F.relu(self.net3(x))
x = F.relu(self.net4(x))
return x
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestDummyModel(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
get_input
|
def get_input(self):
return torch.rand(8, 8, device="cuda")
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestDummyModel(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
state_dict
|
def state_dict(self):
return {"data": self.data}
|
import time
from dataclasses import dataclass, field
from enum import auto, Enum
from functools import partial
from io import BytesIO
from typing import Any, Dict, List
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as DCP
import torch.distributed.checkpoint.state_dict_saver as saver
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed._tensor.device_mesh import init_device_mesh
from torch.distributed.checkpoint.state_dict import (
_patch_model_state_dict,
_patch_optimizer_state_dict,
get_model_state_dict,
get_optimizer_state_dict,
get_state_dict,
set_state_dict,
)
from torch.distributed.checkpoint.state_dict_loader import _load_state_dict_from_keys
from torch.distributed.checkpoint.utils import CheckpointException
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestStatefulObj:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_fsdp_ep.py
|
test_e2e
|
def test_e2e(self):
model = TopModel(self.rank).cuda()
mesh_fsdp_tp = init_device_mesh(
self.device_type, (2, 4), mesh_dim_names=("dp", "tp")
)
# TODO: we are using an internal API atm. Change to a publich API once it is ready.
mesh_fsdp_ep = _mesh_resources.create_child_mesh(mesh_fsdp_tp, ("dp",))
del _mesh_resources.child_to_parent_mapping[mesh_fsdp_ep]
mesh_fsdp = init_device_mesh(self.device_type, (8,))
for i, l in enumerate(model.second.ep_layers):
model.second.ep_layers[i] = FSDP(
l, use_orig_params=True, device_mesh=mesh_fsdp_ep
)
model.second = FSDP(model.second, use_orig_params=True, device_mesh=mesh_fsdp)
model = FSDP(model, use_orig_params=True, device_mesh=mesh_fsdp)
optim = torch.optim.Adam(model.parameters(), lr=0.1)
msd, osd = get_state_dict(model, optim)
# FSDP only params
for key in (
"net.0.weight",
"net.0.bias",
"second.net.0.weight",
"second.net.0.bias",
):
msd_v = msd[key]
osd_v = osd["state"][key]["exp_avg"]
for v in (msd_v, osd_v):
self.assertTrue(isinstance(v, DTensor))
self.assertEqual(tuple(v.device_mesh.mesh), tuple(range(8)))
# FSDP/EP params
layer = self.rank % 4
ranks = (layer, layer + 4)
for i in range(4):
for key in (
f"second.ep_layers.{i}.net1.0.weight",
f"second.ep_layers.{i}.net1.0.bias",
f"second.ep_layers.{i}.net2.0.weight",
f"second.ep_layers.{i}.net2.0.bias",
):
if layer != i:
self.assertTrue(key not in msd)
else:
msd_v = msd[key]
osd_v = osd["state"][key]["exp_avg"]
for v in (msd_v, osd_v):
self.assertTrue(isinstance(v, DTensor))
self.assertEqual(tuple(v.device_mesh.mesh), ranks)
self.assertEqual(set(osd["state"].keys()), set(msd.keys()))
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DTensor
from torch.distributed.checkpoint.state_dict import get_state_dict
from torch.distributed.device_mesh import _mesh_resources, init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class TestFSDPWithEP(DTensorTestBase, VerifyStateDictMixin):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/checkpoint/e2e/test_pipeline.py
|
forward
|
def forward(self, batch):
x = self.relu(self.layer1(batch))
x = self.relu(self.layer2(x))
x = self.relu(self.layer3(x))
x = self.relu(self.layer4(x))
return x
|
import os
import sys
import torch
import torch.distributed as dist
import torch.distributed.checkpoint as dcp
import torch.nn as nn
from torch.distributed.checkpoint.state_dict import get_state_dict, set_state_dict
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
DIM = 500
class PipelineModel(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.