library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
__init__
def __init__(self, hidden_dim): super().__init__() self.param1 = nn.Parameter( torch.zeros( hidden_dim, hidden_dim, dtype=torch.float, device="cuda" ) ) self.param2 = nn.Parameter( torch.zeros(hidden_dim, dtype=torch.float, device="cuda") )
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestSubmodule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
forward
def forward(self, x): if not fullgraph: torch._dynamo.graph_break() ret = torch.matmul(x, self.param1) ret = ret * self.param2 ret = torch.relu(ret) return ret
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestSubmodule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
model_init_fn
def model_init_fn(): torch.manual_seed(self.rank) fsdp_config = {} model = nn.Sequential( nn.Linear(hidden_dim, hidden_dim, device="cuda"), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim, device="cuda"), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim, device="cuda"), ) fully_shard(model, reshard_after_forward=True, **fsdp_config) optim = torch.optim.SGD(model.parameters(), lr=1e-4) return model, optim
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
input_creation_fn
def input_creation_fn(): torch.manual_seed(self.rank) inp = torch.randn((2, hidden_dim), device="cuda", requires_grad=False) return inp return model_init_fn, input_creation_fn
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_frozen.py
test_multi_forward_mixed_requires_grad
def test_multi_forward_mixed_requires_grad(self): """ Tests training parity with DDP when having trainable and frozen modules that participate multiple times in forward. """ self.run_subtests( {"reshard_after_forward": [True, False, 2]}, self._test_multi_forward_mixed_requires_grad, )
import copy import functools import itertools from typing import List, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_param_group import ( RegisterPostBackwardFunction, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, MLP, patch_reduce_scatter, patch_register_post_backward_hook_backward, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests class TestFullyShardFrozen(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_frozen.py
__init__
def __init__(self, device: torch.device): super().__init__() self.layer_0 = nn.Linear(5, 5, device=device) self.layer_no_grad = nn.Linear(5, 5, device=device) self.layer_with_grad = nn.Linear(5, 5, device=device) self.layer_no_grad.requires_grad_(False)
import copy import functools import itertools from typing import List, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_param_group import ( RegisterPostBackwardFunction, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, MLP, patch_reduce_scatter, patch_register_post_backward_hook_backward, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests class MultiForwardModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_grad_scaler.py
test_gradient_scaler
def test_gradient_scaler(self): self.run_subtests( {"has_inf": [True, False], "test_2d": [True, False]}, self._test_gradient_scaler, )
import copy import torch import torch.nn as nn from torch.amp.grad_scaler import GradScaler, OptState from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor import init_device_mesh from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm class TestFullyShardGradientScaler(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_grad_scaler.py
_test_gradient_scaler
def _test_gradient_scaler(self, has_inf: bool, test_2d: bool): torch.manual_seed(0) model = nn.Sequential( *[nn.Linear(4, 4, device="cuda", bias=False) for _ in range(2)] ) for layer in model: fully_shard(layer) fully_shard(model) input = torch.randn([4, 4], device="cuda") if test_2d: mesh_2d = init_device_mesh( "cuda", (2, self.world_size // 2), mesh_dim_names=("dp", "tp") ) dp_mesh, tp_mesh = mesh_2d["dp"], mesh_2d["tp"] model = nn.Sequential(MLP(2), MLP(2), MLP(2)) tp_parallelize_plan = { "0.in_proj": ColwiseParallel(), "0.out_proj": RowwiseParallel(), "1.in_proj": ColwiseParallel(), "1.out_proj": RowwiseParallel(), "2.in_proj": ColwiseParallel(), "2.out_proj": RowwiseParallel(), } model = parallelize_module( model, device_mesh=tp_mesh, parallelize_plan=tp_parallelize_plan, ) for module in model: fully_shard(module, mesh=dp_mesh) fully_shard(model, mesh=dp_mesh) input = torch.randn((2,), device="cuda") loss = model(input).sum() scaler = GradScaler(init_scale=2.0, enabled=True) opt = torch.optim.Adam(model.parameters(), lr=1e-2) scaler.scale(loss).backward() inv_scale = scaler._scale.double().reciprocal().float() if ( has_inf is True and opt.param_groups[0]["params"][0].grad._local_tensor.device.index == 1 ): opt.param_groups[0]["params"][0].grad._local_tensor[0, 0].fill_( float("inf") ) inital_grad = opt.param_groups[0]["params"][0].grad.to_local().clone() scaler.unscale_(opt) for found_inf in scaler._per_optimizer_states[id(opt)][ "found_inf_per_device" ].values(): self.assertEqual(found_inf, has_inf) self.assertEqual( scaler._per_optimizer_states[id(opt)]["stage"].value, OptState.UNSCALED.value, ) unscaled_grad = opt.param_groups[0]["params"][0].grad.to_local().clone() self.assertEqual(unscaled_grad, inital_grad * inv_scale) initial_scale = scaler.get_scale() initial_state = copy.copy(opt.state) scaler.step(opt) steped_state = copy.copy(opt.state) if has_inf: # assert parameters are the same before/after self.assertEqual(steped_state, initial_state) else: # new parameters here if no inf found during .unscale_() self.assertNotEqual(steped_state.items(), initial_state.items()) scaler.update() updated_scale = scaler.get_scale() if has_inf: # assert scale is updated backoff_factor = scaler.get_backoff_factor() self.assertEqual(updated_scale, initial_scale * backoff_factor) else: # scale is not updated self.assertEqual(updated_scale, initial_scale)
import copy import torch import torch.nn as nn from torch.amp.grad_scaler import GradScaler, OptState from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor import init_device_mesh from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm class TestFullyShardGradientScaler(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_move_states_to_device_tensor
def test_move_states_to_device_tensor(self): model = MLP(8, torch.device("cpu"), with_buffer=True) for tensor in itertools.chain(model.parameters(), model.buffers()): self.assertEqual(tensor.device, torch.device("cpu")) fully_shard(model) cuda_device = torch.device("cuda", torch.cuda.current_device()) for tensor in itertools.chain(model.parameters(), model.buffers()): self.assertEqual(tensor.device, cuda_device)
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardDeviceTensor(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_move_states_to_device_dtensor_invalid
def test_move_states_to_device_dtensor_invalid(self): assert self.world_size >= 4, f"{self.world_size}" dp_size = 2 global_cuda_mesh = init_device_mesh( "cuda", (dp_size, self.world_size // dp_size), mesh_dim_names=("dp", "tp") ) global_cpu_mesh = init_device_mesh( "cpu", (dp_size, self.world_size // dp_size), mesh_dim_names=("dp", "tp") ) dp_mesh = global_cuda_mesh["dp"] tp_mesh = global_cpu_mesh["tp"] # mismatched meshes! model = MLP(8, torch.device("cpu"), with_buffer=True) parallelize_module( model, tp_mesh, {"in_proj": ColwiseParallel(), "out_proj": RowwiseParallel()}, ) for tensor in itertools.chain(model.parameters(), model.buffers()): self.assertEqual(tensor.device, torch.device("cpu")) if isinstance(tensor, DTensor): self.assertEqual(tensor._local_tensor.device, torch.device("cpu")) regex = r"Requires DTensor to have mesh of the same type as the FSDP mesh but got cpu for DTensor and cuda for FSDP" with self.assertRaisesRegex(ValueError, regex): fully_shard(model, mesh=dp_mesh)
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardDeviceDTensor(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_invalid_mesh_ndim
def test_invalid_mesh_ndim(self): mesh = init_device_mesh("cuda", (self.world_size, 1, 1)) model = MLP(8) regex = r"fully\_shard expects a 1D or 2D DeviceMesh but got DeviceMesh\('cuda', \[\[\[0\]\], \[\[1\]\]\]\)" with self.assertRaisesRegex(ValueError, regex): fully_shard(model, mesh=mesh)
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardMeshArg(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
_assert_dtensor_params
def _assert_dtensor_params(self, params: Iterable[nn.Parameter]): self.assertGreater(len(list(params)), 0) for param in params: self.assertIsInstance(param, DTensor)
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShardRegisteredParams(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_to_float64_after_init
def test_to_float64_after_init(self): """Tests that the user can cast the module to float64 after init.""" # NOTE: Test fp64 instead of a lower precision dtype like bf16 for # better numerics. The important part is changing the dtype. torch.manual_seed(42) mlp_dim, device, dtype = 4, torch.device("cuda"), torch.float64 model = MLP(mlp_dim, device=device) for param in model.parameters(): dist.broadcast(param, src=0) ref_model = copy.deepcopy(model).to(dtype) replicate(ref_model) ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2) for module in (model.in_proj, model.out_proj, model): fully_shard(module) model.to(dtype) for param in model.parameters(): self.assertEqual(param.dtype, dtype) optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True) check_sharded_parity(self, ref_model, model) torch.manual_seed(42 + self.rank + 1) inp = torch.randn((2, mlp_dim), device="cuda", dtype=dtype) for iter_idx in range(10): losses: List[torch.Tensor] = [] for _model in (ref_model, model): losses.append(_model(inp).sum()) losses[-1].backward() self.assertEqual(losses[0], losses[1]) check_sharded_parity(self, ref_model, model) for _optim in (ref_optim, optim): _optim.step() _optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShardCastAfterInit(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_train_parity_single_group
def test_train_parity_single_group(self): """Tests train parity with DDP for a single FSDP group.""" self.run_subtests( { "lin_shapes": [[(16, 15), (15, 8)], [(7, 15), (15, 3)]], }, self._test_train_parity_single_group, )
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShard1DTrainingCore(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_train_parity_multi_group_cpu_offload_eager
def test_train_parity_multi_group_cpu_offload_eager(self): """ Tests train parity against DDP when using multiple parameter groups for communication and CPU offloading. """ self.run_subtests( { "reshard_after_forward": [True], # save CI time "offload_policy": [ CPUOffloadPolicy(pin_memory=True), CPUOffloadPolicy(pin_memory=False), ], "device_type": ["cuda"], "delay_after_forward": [False, True], "delay_before_all_gather": [False, True], "delay_before_reduce_scatter": [False, True], "delay_before_optim": [False, True], }, self._test_train_parity_multi_group, )
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShard1DTrainingCore(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
delayed_all_gather
def delayed_all_gather(*args, **kwargs): torch.cuda._sleep(int(delay_in_ms * get_cycles_per_ms())) return orig_all_gather(*args, **kwargs)
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
delayed_reduce_scatter
def delayed_reduce_scatter(*args, **kwargs): torch.cuda._sleep(int(delay_in_ms * get_cycles_per_ms())) return orig_reduce_scatter(*args, **kwargs) torch.manual_seed(42 + self.rank + 1) patch_all_gather_ctx = ( patch_all_gather(delayed_all_gather) if delay_before_all_gather else contextlib.nullcontext() ) patch_reduce_scatter_ctx = ( patch_reduce_scatter(delayed_reduce_scatter) if delay_before_reduce_scatter else contextlib.nullcontext() ) with patch_all_gather_ctx, patch_reduce_scatter_ctx: for iter_idx in range(10): inp = torch.randint(0, vocab_size, (3, 64), device=device_type) losses: List[torch.Tensor] = [] for _model, _optim in ((ref_model, ref_optim), (model, optim)): _optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) losses.append(_model(inp).sum()) if _model is model and delay_after_forward: torch.cuda._sleep(int(delay_in_ms * get_cycles_per_ms())) losses[-1].backward() if _model is model and delay_before_optim: torch.cuda._sleep(int(delay_in_ms * get_cycles_per_ms())) _optim.step() self.assertEqual(losses[0], losses[1])
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_multi_forward_module
def test_multi_forward_module(self): """ Tests parity with DDP when running a module that participates multiple times in forward. """ self.run_subtests( {"reshard_after_forward": [True, False, 2]}, self._test_multi_forward_module, )
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShard1DTrainingCore(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
test_set_reduce_scatter_divide_factor
def test_set_reduce_scatter_divide_factor(self): self.run_subtests( {"divide_factor": [self.world_size * 2, self.world_size]}, self._test_set_reduce_scatter_divide_factor, )
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class TestFullyShardCommunication(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
_test_set_reduce_scatter_divide_factor
def _test_set_reduce_scatter_divide_factor(self, divide_factor: float): torch.manual_seed(42) model_args = ModelArgs(dropout_p=0.0, weight_tying=False) model = Transformer(model_args) ref_model = copy.deepcopy(model).cuda() ref_optim = torch.optim.AdamW(ref_model.parameters(), lr=1e-2) for module in model.modules(): if isinstance(module, TransformerBlock): fully_shard(module, reshard_after_forward=False) model = fully_shard(model, reshard_after_forward=False) optim = torch.optim.AdamW(model.parameters(), lr=1e-2) model.set_reduce_scatter_divide_factor(divide_factor) torch.manual_seed(42 + self.rank) inp = torch.randint(0, model_args.vocab_size, (2, 16), device="cuda") for iter_idx in range(10): ref_loss = ref_model(inp).sum() ref_loss.backward() for param in ref_model.parameters(): param.grad.mul_(1.0 / divide_factor) dist.all_reduce(param.grad) loss = model(inp).sum() loss.backward() ref_optim.step() optim.step() ref_optim.zero_grad() optim.zero_grad() self.assertEqual(ref_loss, loss) check_sharded_parity(self, ref_model, model)
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class TestFullyShardCommunication(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
test_fully_shard_backward_prefetch
def test_fully_shard_backward_prefetch(self): # Activation checkpointing should not affect the expected FSDP events self.run_subtests( { "reshard_after_forward": [True, False, 2], "checkpoint_impl": [None, "utils", "composable"], }, self._test_backward_prefetch_forward_backward, ) self.run_subtests( { "reshard_after_forward": [True, False, 2], "checkpoint_impl": [None, "utils", "composable"], }, self._test_backward_prefetch_multi_forward, ) self._test_backward_prefetch_unused_in_backward(True)
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class TestFullyShardPrefetch(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
test_fully_shard_multi_module_unused_module
def test_fully_shard_multi_module_unused_module(self): class ModuleWithUnusedLinear(nn.Module): def __init__(self) -> None: super().__init__() self.unused_lin = nn.Linear(1, 1) self.lin = nn.Linear(16, 16) def forward(self, x: torch.Tensor) -> torch.Tensor: return nn.functional.relu(self.lin(x)) model = nn.Sequential( ModuleWithUnusedLinear(), ModuleWithUnusedLinear(), nn.Linear(16, 16) ) fully_shard([model[0].unused_lin, model[0].lin], reshard_after_forward=True) fully_shard([model[1].unused_lin, model[1].lin], reshard_after_forward=True) fully_shard(model) optim = torch.optim.AdamW(model.parameters(), lr=1e-2) events: List[EventType] = [] unshard_with_record = self._get_unshard_with_record( FSDPParamGroup.unshard, events ) post_backward_with_record = self._get_post_backward_with_record( FSDPParamGroup.post_backward, events ) inp = torch.randn((2, 16), device="cuda") with patch_unshard(unshard_with_record), patch_post_backward( post_backward_with_record ): for iter_idx in range(3): loss = model(inp) expected_events = [ ("unshard", "", TrainingState.FORWARD), ("unshard", "0.unused_lin, 0.lin", TrainingState.FORWARD), ("unshard", "1.unused_lin, 1.lin", TrainingState.FORWARD), ] self.assertEqual(events, expected_events) events.clear() loss.sum().backward() expected_events = [ # Since both `model[0]` and `model[1]` have unused modules # that never ran forward, they do not reshard after forward # despite setting it to `True`. Check that there are no # unshards in backward. ( "post_backward", "1.unused_lin, 1.lin", TrainingState.POST_BACKWARD, ), ( "post_backward", "0.unused_lin, 0.lin", TrainingState.POST_BACKWARD, ), ("post_backward", "", TrainingState.POST_BACKWARD), ] events.clear() optim.step() optim.zero_grad()
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class TestFullyShardPrefetch(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
unshard_with_record
def unshard_with_record(self, *args, **kwargs): nonlocal events if ( self._all_gather_result is None and self._sharded_state != ShardedState.UNSHARDED ): # skip no-ops events.append(("unshard", self._module_fqn, self._training_state)) return orig_unshard(self, *args, **kwargs) return unshard_with_record
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
reshard_with_record
def reshard_with_record(self, *args, **kwargs): nonlocal events if ( self._training_state == TrainingState.FORWARD and not self._reshard_after_forward ): # skip no-ops return events.append(("reshard", self._module_fqn, self._training_state)) return orig_reshard(self, *args, **kwargs) return reshard_with_record
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
post_backward_with_record
def post_backward_with_record(self, *args, **kwargs): nonlocal events ret = orig_post_backward(self, *args, **kwargs) # Use training state after running post-backward to check that the # state is transitioned to `POST_BACKWARD` as expected events.append(("post_backward", self._module_fqn, self._training_state)) return ret return post_backward_with_record
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_raise_scalar_parameter
def test_raise_scalar_parameter(self): """Tests raising an exception when the model has scalar parameters.""" model = nn.Sequential(*[MLP(3, dim_multiplier=3) for _ in range(3)]) model.register_parameter("scalar_p", nn.Parameter(torch.tensor(1.0).cuda())) with self.assertRaisesRegex( ValueError, "Change scalar_p to a 1D tensor with numel equal to 1." ): fully_shard(model)
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardShardedParameterTensor(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_shard_dtensor_parameters
def test_shard_dtensor_parameters(self): dp_size = 2 if self.world_size > 2 else 1 global_mesh = init_device_mesh( "cuda", (dp_size, self.world_size // dp_size), mesh_dim_names=("dp", "tp") ) dp_mesh, tp_mesh = global_mesh["dp"], global_mesh["tp"] # Use odd dim sizes to test uneven shards # TODO: change "mlp_dim" back to 9 when uneven sharding # is supported for FSDP+TP model = MLP(8, dim_multiplier=3) orig_params = [param.detach().clone() for param in model.parameters()] orig_param_names = [param_name for param_name, _ in model.named_parameters()] parallelize_module( model, tp_mesh, {"in_proj": ColwiseParallel(), "out_proj": RowwiseParallel()}, ) fully_shard(model, mesh=dp_mesh) sharded_params = list(model.parameters()) self.assertEqual(len(orig_params), len(sharded_params)) for orig_param_name, orig_param, sharded_param in zip( orig_param_names, orig_params, sharded_params ): self.assertIsInstance(sharded_param, DTensor) self.assertEqual(sharded_param.device_mesh, global_mesh) self.assertEqual(sharded_param.size(), orig_param.size()) self.assertEqual(sharded_param.stride(), orig_param.stride()) if "in_proj" in orig_param_name: expected_placements = ( _StridedShard(0, split_factor=tp_mesh.size()), Shard(0), ) elif "out_proj" in orig_param_name and "weight" in orig_param_name: expected_placements = (Shard(0), Shard(1)) else: expected_placements = (Shard(0), Replicate()) self.assertEqual(sharded_param._spec.placements, expected_placements)
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardShardedParameterDTensor(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
test_reset_sharded_param_in_lazy_init
def test_reset_sharded_param_in_lazy_init(self): class MyModel(nn.Module): def __init__(self): super().__init__() self.layer1 = nn.Linear(3, 3, bias=False) self.layer2 = nn.Linear(3, 3, bias=False) self.weight_norm = nn.Parameter(torch.empty(3)) def init_weight_norm(self): with torch.no_grad(): weight_norm = torch.linalg.norm( self.layer1.weight, dim=1 ) + torch.linalg.norm(self.layer2.weight, dim=1) model.weight_norm = nn.Parameter(weight_norm) def forward(self, inp: torch.Tensor) -> torch.Tensor: out = self.layer1(inp) out = self.layer2(out) return out.sum() + self.weight_norm.sum() with torch.device("meta"): model = MyModel() fully_shard(model.layer1) fully_shard(model.layer2) fully_shard(model) model.layer1.to_empty(device="cuda") model.layer2.to_empty(device="cuda") model.init_weight_norm() inp = torch.randn(3, 3, device="cuda") loss = model(inp).sum() loss.backward()
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardLazyInit(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
__init__
def __init__(self): super().__init__() self.layer1 = nn.Linear(3, 3, bias=False) self.layer2 = nn.Linear(3, 3, bias=False) self.weight_norm = nn.Parameter(torch.empty(3))
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class MyModel(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_init.py
init_weight_norm
def init_weight_norm(self): with torch.no_grad(): weight_norm = torch.linalg.norm( self.layer1.weight, dim=1 ) + torch.linalg.norm(self.layer2.weight, dim=1) model.weight_norm = nn.Parameter(weight_norm)
import copy import itertools import unittest from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import replicate from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_init import ( _get_managed_modules, _get_managed_states, ) from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos from torch.distributed._tensor import ( DeviceMesh, distribute_tensor, DTensor, Replicate, Shard, ) from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp._init_utils import ( _init_inter_node_process_group, _init_intra_node_process_group, ) from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.distributed.tensor.placement_types import _StridedShard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class MyModel(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
test_norm_modules_fp16
def test_norm_modules_fp16(self): mp_policy = MixedPrecisionPolicy(param_dtype=torch.float16) self._test_norm_modules(mp_policy)
import copy import functools from typing import Dict, List, Optional, Union import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed._composable.fsdp._fsdp_collectives import ( _get_gradient_divide_factors, ) from torch.testing._internal.common_distributed import ( requires_nccl_version, SaveForwardInputsModel, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, patch_reduce_scatter, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests class TestFullyShardMixedPrecisionCasts(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
inner
def inner(model: nn.Module, x: torch.Tensor): # Run forward and backward to check for no type mismatch errors z = model(x) self.assertEqual(z.dtype, mp_policy.param_dtype) z.sum().backward() # Layer norm model = nn.Sequential(nn.Linear(32, 32), nn.LayerNorm(32), nn.Linear(32, 32)) for module in (model[0], model[1], model[2], model): fully_shard(module, mp_policy=mp_policy) inner(model, torch.randn((4, 32))) # Batch norm 1D model = nn.Sequential(nn.Linear(32, 32), nn.BatchNorm1d(32), nn.Linear(32, 32)) for module in (model[0], model[1], model[2], model): fully_shard(module, mp_policy=mp_policy) inner(model, torch.randn((4, 32))) # Batch norm 2D: error in backward from buffer dtype mismatch model = nn.Sequential(nn.Conv2d(1, 5, 3), nn.BatchNorm2d(5), nn.Conv2d(5, 4, 3)) for module in (model[0], model[1], model[2], model): fully_shard(module, mp_policy=mp_policy) with self.assertRaisesRegex(RuntimeError, "Expected running_mean to have type"): # Errors in batch norm 2D backward inner(model, torch.randn((3, 1, 9, 9))) # Batch norm 2D: cast buffers down to lower precision model = nn.Sequential(nn.Conv2d(1, 5, 3), nn.BatchNorm2d(5), nn.Conv2d(5, 4, 3)) for module in (model[0], model[1], model[2], model): fully_shard(module, mp_policy=mp_policy) # Casting batch norm buffers to the lower precision allows backward model[1].running_mean = model[1].running_mean.to(mp_policy.param_dtype) model[1].running_var = model[1].running_var.to(mp_policy.param_dtype) inner(model, torch.randn((3, 1, 9, 9))) # Batch norm 2D: use special mixed precision policy model = nn.Sequential(nn.Conv2d(1, 5, 3), nn.BatchNorm2d(5), nn.Conv2d(5, 4, 3)) bn_mp_policy = MixedPrecisionPolicy(output_dtype=mp_policy.param_dtype) fully_shard(model[1], mp_policy=bn_mp_policy) for module in (model[0], model[2], model): fully_shard(module, mp_policy=mp_policy) inner(model, torch.randn((3, 1, 9, 9)))
import copy import functools from typing import Dict, List, Optional, Union import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed._composable.fsdp._fsdp_collectives import ( _get_gradient_divide_factors, ) from torch.testing._internal.common_distributed import ( requires_nccl_version, SaveForwardInputsModel, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, patch_reduce_scatter, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
delay_collective
def delay_collective(): # Share a stream so that all-gather and reduce-scatter block each # other like in `ProcessGroupNCCL` comm_stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(comm_stream): torch.cuda._sleep(int(comm_sleep_ms * get_cycles_per_ms())) torch.cuda.current_stream().wait_stream(comm_stream)
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
delayed_all_gather
def delayed_all_gather(*args, **kwargs): delay_collective() return orig_all_gather_into_tensor(*args, **kwargs)
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
delayed_reduce_scatter
def delayed_reduce_scatter(*args, **kwargs): delay_collective() return orig_reduce_scatter_tensor(*args, **kwargs) inp = torch.randn((2, dim), device="cuda") loss = model(inp).sum() # warmup CUDA and allocator loss.backward()
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
ref_fwd
def ref_fwd(): with patch_all_gather(delayed_all_gather): # Run dummy all-gathers per weight (which is one FSDP group) for lin in ref_model: dummy_ag_output = torch.empty_like(lin.weight) dummy_ag_input = torch.chunk(dummy_ag_output, self.world_size)[ self.rank ] dist.all_gather_into_tensor(dummy_ag_output, dummy_ag_input) return ref_model(inp)
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
fwd
def fwd(): with patch_all_gather(delayed_all_gather): model(inp) ref_fwd_time = self._time_fn(ref_fwd) fwd_time = self._time_fn(fwd) # Forward: only 1st all-gather is exposed # NOTE: Do not enforce the expected forward time due to flakiness in CI # expected_fwd_time = comm_sleep_ms + num_linears * compute_sleep_ms + buffer_ms self.assertLessEqual(fwd_time, ref_fwd_time)
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
_is_fallback_op_in_snodes
def _is_fallback_op_in_snodes(snodes, op): return any(is_fallback_op(snode.node, op) for snode in snodes) class TestFullyShardCompileCompute(FSDPTest): @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @skip_if_lt_x_gpu(2) def test_disable_compiling_hooks(self): self.run_subtests( { "skip_fsdp_hooks": [False, True], }, self._test_disable_compiling_hooks, ) def _test_disable_compiling_hooks( self, skip_fsdp_hooks: bool, ): torch._dynamo.reset() trace_rules_check_count = 0 HOOKS_FILE_NAME = "torch/distributed/_composable/fsdp/_fsdp_state.py" HOOK_WRAPPER_NAME = "fsdp_hook_wrapper" def patched_trace_rules_check(*args, **kwargs): nonlocal trace_rules_check_count f_code = args[0] if ( hasattr(f_code, "co_filename") and f_code.co_filename.endswith(HOOKS_FILE_NAME) and f_code.co_name != HOOK_WRAPPER_NAME ): trace_rules_check_count += 1 return orig_trace_rules_check(*args, **kwargs) original_skip_fsdp_hooks = torch._dynamo.config.skip_fsdp_hooks orig_trace_rules_check = torch._dynamo.trace_rules.check torch.distributed.barrier() torch._dynamo.config.skip_fsdp_hooks = skip_fsdp_hooks torch._dynamo.trace_rules.check = patched_trace_rules_check model = MLP(4) fully_shard(model) model.compile() model(torch.randn((4, 4), device="cuda")) torch.distributed.barrier() torch._dynamo.config.skip_fsdp_hooks = original_skip_fsdp_hooks torch._dynamo.trace_rules.check = orig_trace_rules_check if skip_fsdp_hooks: self.assertEqual(trace_rules_check_count, 0) else: self.assertTrue(trace_rules_check_count > 0) class TestFullyShardCompile(FSDPTest): fake_pg = not at_least_x_gpu(2) @property def world_size(self) -> int: return 2 def test_dynamo_trace_use_training_state(self): torch._dynamo.reset() # Construct a dummy FSDPParamGroup, since we just want to test the `use_training_state` ctx manager. param_group = FSDPParamGroup( [], # params: List[nn.Parameter], (torch.nn.Linear(1, 1),), # module: Tuple[nn.Module, ...], None, # mesh_info: FSDPMeshInfo, None, # post_forward_mesh_info: Optional[FSDPMeshInfo], None, # device: torch.device, None, # mp_policy: MixedPrecisionPolicy, None, # offload_policy: OffloadPolicy, ) def f(x): param_group._training_state = TrainingState.IDLE with param_group.use_training_state(TrainingState.FORWARD): if param_group._training_state == TrainingState.FORWARD: return x + 1 else: return x inp = torch.zeros(1) self.assertEqual(param_group._training_state, TrainingState.IDLE) eager_out = f(inp) self.assertEqual(param_group._training_state, TrainingState.IDLE) self.assertEqual(eager_out, inp + 1) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") compiled_out = torch.compile(f, backend=cnt, fullgraph=True)(inp) self.assertEqual(param_group._training_state, TrainingState.IDLE) self.assertEqual(eager_out, compiled_out) self.assertEqual(cnt.frame_count, 1) self.assertEqual(cnt.op_count, 1) self.assertEqual(len(cnt.graphs), 1) def test_trace_fsdp_set_(self): @torch.library.custom_op("mylib::add_one_out", mutates_args={"out"}) def add_one_out(x: torch.Tensor, out: torch.Tensor) -> None: torch.add(x, 1, out=out) def f(x): buf = torch.zeros(2) buf_view = buf.view(-1) torch.ops.mylib.add_one_out(x, out=buf_view) buf_view2 = buf.view(-1) torch.ops.fsdp.set_(x, buf_view2) ref_x = torch.zeros(2) x = copy.deepcopy(ref_x) f(ref_x) torch.compile(f, backend="aot_eager")(x) self.assertEqual(x, ref_x) def _reinplace_all_gather_with_optional_checks(self, fullgraph): def _run_with_checks(graph, orig_fn): self.assertTrue( _is_op_in_graph( graph, torch.ops._c10d_functional.all_gather_into_tensor.default, ) ) orig_fn(graph) self.assertFalse( _is_op_in_graph( graph, torch.ops._c10d_functional.all_gather_into_tensor.default, ) ) self.assertTrue( _is_op_in_graph( graph, torch.ops._c10d_functional.all_gather_into_tensor_out.default, ) ) if fullgraph: return mock.patch.object( comms, "reinplace_fsdp_all_gather", functools.partial( _run_with_checks, orig_fn=comms.reinplace_fsdp_all_gather, ), ) else: return contextlib.nullcontext() def _is_fwd_graph(self, snodes): ag_copy_in_snode = None for snode in snodes: if is_fallback_op(snode.node, torch.ops.fsdp.all_gather_copy_in.default): ag_copy_in_snode = snode break self.assertTrue(ag_copy_in_snode is not None) if any( dep.name.startswith("primals_") for dep in ag_copy_in_snode.read_writes.reads ): return True else: return False def _maybe_run_decide_global_ordering_of_comms_with_checks(self, fullgraph): def _check_fsdp_ops_in_snodes(snodes, is_fwd_graph, expect=True): assert_method = self.assertTrue if expect else self.assertFalse common_ops = { torch.ops.fsdp.all_gather_copy_in.default, torch.ops._c10d_functional.all_gather_into_tensor_out.default, torch.ops.fsdp.split_with_sizes_copy.default, } bwd_only_ops = { torch.ops.fsdp.chunk_cat.default, torch.ops._c10d_functional.reduce_scatter_tensor.default, } for op in common_ops: assert_method( _is_fallback_op_in_snodes( snodes, op, ), msg=f"{op}", ) if not is_fwd_graph: for op in bwd_only_ops: assert_method( _is_fallback_op_in_snodes( snodes, op, ), msg=f"{op}", ) def _decide_global_ordering_of_comms_with_checks( snodes, name_to_buf, name_to_fused_node, orig_fn ): is_fwd_graph = self._is_fwd_graph(snodes) _check_fsdp_ops_in_snodes(snodes, is_fwd_graph, expect=True) new_snodes = orig_fn(snodes, name_to_buf, name_to_fused_node) _check_fsdp_ops_in_snodes(new_snodes, is_fwd_graph, expect=False) return new_snodes if fullgraph: return mock.patch.object( comms, "decide_global_ordering_of_comms", functools.partial( _decide_global_ordering_of_comms_with_checks, orig_fn=comms.decide_global_ordering_of_comms, ), ) else: return contextlib.nullcontext() def inductor_code_check_no_compute_op(self, file_check): return ( file_check.check_not(" = aten.") .check_not(" = extern_kernels.") .check_not(" = triton_") .check_not(" = torch.ops.") .check_not(" = inductor_ops.") .check_not(" aten.") .check_not(" extern_kernels.") .check_not(" triton_") .check_not(" torch.ops.") .check_not(" inductor_ops.") ) def inductor_code_check_fsdp_all_gather( self, file_check, overlapped_compute_op_str, num_resize, num_set, last_all_gather=False, ): file_check = file_check.check("torch.ops.fsdp.all_gather_copy_in.") file_check = self.inductor_code_check_no_compute_op(file_check) file_check = file_check.check( "torch.ops._c10d_functional.all_gather_into_tensor_out." ) # Checks that AGWait is delayed, making the AG overlap with some compute op. if overlapped_compute_op_str is not None: file_check = file_check.check(f"{overlapped_compute_op_str}") file_check = file_check.check_count( "inductor_ops.resize_storage_bytes_(", num_resize, exactly=True ) file_check = file_check.check("torch.ops._c10d_functional.wait_tensor.") file_check = self.inductor_code_check_no_compute_op(file_check) file_check = file_check.check("torch.ops.fsdp.split_with_sizes_copy.") file_check = self.inductor_code_check_no_compute_op(file_check) file_check = file_check.check_count( "torch.ops.aten.set_.", num_set, exactly=True ) if not last_all_gather: # Checks that there is no compute op between this AGWait and next AG. file_check = self.inductor_code_check_no_compute_op(file_check) return file_check def inductor_code_check_fsdp_reduce_scatter( self, file_check, overlapped_compute_op_str ): file_check = file_check.check("torch.ops.fsdp.chunk_cat.") file_check = self.inductor_code_check_no_compute_op(file_check) file_check = file_check.check( "torch.ops._c10d_functional.reduce_scatter_tensor." ) # Checks that RSWait is delayed, making the RS overlap with some compute op. if overlapped_compute_op_str is not None: file_check = file_check.check(f"{overlapped_compute_op_str}") file_check = file_check.check("torch.ops._c10d_functional.wait_tensor.") return file_check @torch._dynamo.config.patch( inline_inbuilt_nn_modules=True, skip_fsdp_hooks=False, ) @torch._functorch.config.patch(recompute_views=True) @torch._functorch.config.patch(cse=False) @torch._inductor.config.patch( reorder_for_compute_comm_overlap=True, reorder_for_compute_comm_overlap_passes=[ "sink_waits", "raise_comms", "reorder_compute_for_overlap", ], ) def _test_traceable_fsdp( self, model_init_fn, input_creation_fn, backend, fullgraph ): def compiler_fn(compiled_autograd_backend): def _fn(gm): # fullgraph=True because graph-break in Compiled Autograd BWD graph is not supported by Traceable FSDP2 yet # (main difficulty comes from queue_callback not working well when BWD has graph break). return torch.compile( gm, backend=compiled_autograd_backend, fullgraph=True ) return _fn def run_iters(model, optim, n_iter=10, compiled_autograd_backend=None): torch.manual_seed(42) losses = [] for i in range(n_iter): inp = input_creation_fn() if compiled_autograd_backend is not None: maybe_compiled_autograd_ctx = compiled_autograd.enable( compiler_fn(compiled_autograd_backend) ) else: maybe_compiled_autograd_ctx = contextlib.nullcontext() with maybe_compiled_autograd_ctx: out = model(inp) loss = out.sum() losses.append(loss.item()) loss.backward() optim.step() optim.zero_grad(set_to_none=True) return losses def test_compiled(): model, optim = model_init_fn() # FSDP2 does lazy init using 1st run, so run it once to init using eager mode run_iters(model, optim, n_iter=1) model_compiled = torch.compile(model, backend=backend, fullgraph=fullgraph) res = run_iters(model_compiled, optim, compiled_autograd_backend=backend) return res def test_eager(): model, optim = model_init_fn() # FSDP2 does lazy init using 1st run, so run it once to init using eager mode run_iters(model, optim, n_iter=1) res = run_iters(model, optim) return res losses_compiled = test_compiled() losses_eager = test_eager() if not self.fake_pg: for loss_compiled, loss_eager in zip(losses_compiled, losses_eager): self.assertTrue( torch.allclose( torch.tensor(loss_compiled), torch.tensor(loss_eager), rtol=1e-5, atol=1e-8, ), f"{loss_compiled} vs {loss_eager}", ) def _create_simple_mlp_factory_fns(self): hidden_dim = 16 def model_init_fn(): torch.manual_seed(self.rank) fsdp_config = {} model = nn.Sequential( nn.Linear(hidden_dim, hidden_dim, device="cuda"), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim, device="cuda"), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim, device="cuda"), ) fully_shard(model, reshard_after_forward=True, **fsdp_config) optim = torch.optim.SGD(model.parameters(), lr=1e-4) return model, optim def input_creation_fn(): torch.manual_seed(self.rank) inp = torch.randn((2, hidden_dim), device="cuda", requires_grad=False) return inp return model_init_fn, input_creation_fn @skipIfRocm @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_simple_mlp_fullgraph_backend_aot_eager(self): self._test_traceable_fsdp( *self._create_simple_mlp_factory_fns(), "aot_eager", fullgraph=True ) @skipIfRocm @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_simple_mlp_fullgraph_backend_aot_eager_decomp_partition(self): self._test_traceable_fsdp( *self._create_simple_mlp_factory_fns(), "aot_eager_decomp_partition", fullgraph=True, ) @skipIfRocm @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_simple_mlp_fullgraph_backend_inductor(self): self._test_traceable_fsdp( *self._create_simple_mlp_factory_fns(), "inductor", fullgraph=True ) def _create_nested_fully_shard_factory_fns(self, fullgraph): hidden_dim = 16 class TestSubmodule(nn.Module): def __init__(self, hidden_dim): super().__init__() self.param1 = nn.Parameter( torch.zeros( hidden_dim, hidden_dim, dtype=torch.float, device="cuda" ) ) self.param2 = nn.Parameter( torch.zeros(hidden_dim, dtype=torch.float, device="cuda") ) def forward(self, x): if not fullgraph: torch._dynamo.graph_break() ret = torch.matmul(x, self.param1) ret = ret * self.param2 ret = torch.relu(ret) return ret class TestModule(nn.Module): def __init__(self, n_layers): super().__init__() self.layers = torch.nn.ModuleList() for layer_id in range(n_layers): self.layers.append(TestSubmodule(hidden_dim)) def forward(self, x): # Intentionally reusing all layers a few times, # to test "multiple all-gathers for the same parameter" case. for layer in self.layers: x = layer(x) for layer in self.layers: x = layer(x) for layer in self.layers: x = layer(x) return x def model_init_fn(): torch.manual_seed(self.rank) fsdp_config = {} mesh = init_device_mesh("cuda", (self.world_size,)) model = TestModule(n_layers=3) for layer_id, mod in enumerate(model.layers): fully_shard(mod, mesh=mesh, reshard_after_forward=True, **fsdp_config) model = fully_shard( model, mesh=mesh, reshard_after_forward=True, **fsdp_config ) optim = torch.optim.SGD(model.parameters(), lr=1e-4) return model, optim def input_creation_fn(): torch.manual_seed(self.rank) inp = torch.randn((2, hidden_dim), device="cuda", requires_grad=False) return inp return model_init_fn, input_creation_fn @skipIfRocm @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_nested_fully_shard_backend_aot_eager(self): for fullgraph in [True, False]: self._test_traceable_fsdp( *self._create_nested_fully_shard_factory_fns(fullgraph=fullgraph), "aot_eager", fullgraph=fullgraph, ) @skipIfRocm @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_nested_fully_shard_backend_aot_eager_decomp_partition(self): for fullgraph in [True, False]: self._test_traceable_fsdp( *self._create_nested_fully_shard_factory_fns(fullgraph=fullgraph), "aot_eager_decomp_partition", fullgraph=fullgraph, ) @skipIfRocm @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_nested_fully_shard_backend_inductor(self): for fullgraph in [True, False]: with self._reinplace_all_gather_with_optional_checks( fullgraph ), self._maybe_run_decide_global_ordering_of_comms_with_checks(fullgraph): _, triton_codes = run_and_get_code( lambda: self._test_traceable_fsdp( *self._create_nested_fully_shard_factory_fns( fullgraph=fullgraph ), "inductor", fullgraph=fullgraph, ) ) if fullgraph: self.assertTrue( len(triton_codes) == 2, "Expected two separate lowerings to Triton code, one from FWD graph and one from Compiled Autograd BWD graph", ) fwd_code = triton_codes[0] file_check = FileCheck().check("def call(args):") for fwd_ag_block_info in [ dict(overlapped_compute_op_str=None, num_resize=0, num_set=2), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=2, num_set=2, last_all_gather=True, ), ]: file_check = self.inductor_code_check_fsdp_all_gather( file_check, **fwd_ag_block_info ) file_check.run(fwd_code) bwd_code = triton_codes[1] file_check = FileCheck().check("def call(args):") for bwd_ag_block_info in [ dict(overlapped_compute_op_str=None, num_resize=0, num_set=2), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=0, num_set=2, ), dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=0, num_set=2, last_all_gather=True, ), ]: file_check = self.inductor_code_check_fsdp_all_gather( file_check, **bwd_ag_block_info ) for bwd_rs_block_info in [ dict(overlapped_compute_op_str="extern_kernels.mm("), dict( overlapped_compute_op_str=None ), # TODO: improve compute/comm overlap, so that `overlapped_compute_op_str` is not None dict(overlapped_compute_op_str=None), ]: file_check = self.inductor_code_check_fsdp_reduce_scatter( file_check, **bwd_rs_block_info ) file_check.run(bwd_code) else: # TODO: when fullgraph=False and there is graph break in FWD graph, # there are several recompiles, need to figure out why. self.assertTrue( len(triton_codes) > 2, "Expected at least 3 separate lowerings to Triton code, which means at least 1 graph break in FWD graph", ) def _create_transformer_factory_fns(self): seq_len = 16 vocab_size = 8 def model_init_fn(): torch.manual_seed(self.rank) fsdp_config = {} mesh = init_device_mesh("cuda", (self.world_size,)) model_args = ModelArgs( vocab_size=vocab_size, n_layers=3, ) model = Transformer(model_args) for layer_id, mod in enumerate(model.layers): fully_shard(mod, mesh=mesh, reshard_after_forward=True, **fsdp_config) model = fully_shard( model, mesh=mesh, reshard_after_forward=True, **fsdp_config ) optim = torch.optim.SGD(model.parameters(), lr=1e-4) return model, optim def input_creation_fn(): torch.manual_seed(self.rank) inp = torch.randint( 0, vocab_size, (2, seq_len), device="cuda", requires_grad=False ) return inp return model_init_fn, input_creation_fn def _maybe_add_graph_break_to_sdpa(self, fullgraph): def _sdpa_with_graph_break(orig_fn, fullgraph, *args, **kwargs): if not fullgraph: torch._dynamo.graph_break() return orig_fn(*args, **kwargs) return mock.patch.object( F, "scaled_dot_product_attention", functools.partial( _sdpa_with_graph_break, F.scaled_dot_product_attention, fullgraph, ), ) @skipIfRocm @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_transformer_backend_aot_eager(self): for fullgraph in [True, False]: with self._maybe_add_graph_break_to_sdpa( fullgraph ), self._reinplace_all_gather_with_optional_checks(fullgraph): self._test_traceable_fsdp( *self._create_transformer_factory_fns(), "aot_eager", fullgraph=fullgraph, ) @skipIfRocm @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") # TODO: native_dropout has worse accuracy after decomp, need to figure out why @torch._inductor.config.patch(fallback_random=True) def test_transformer_backend_aot_eager_decomp_partition(self): for fullgraph in [True, False]: with self._maybe_add_graph_break_to_sdpa(fullgraph): self._test_traceable_fsdp( *self._create_transformer_factory_fns(), "aot_eager_decomp_partition", fullgraph=fullgraph, ) @skipIfRocm @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") # TODO: native_dropout causes CUDA IMA error, need to figure out why @torch._inductor.config.patch(fallback_random=True) def test_transformer_backend_inductor(self): for fullgraph in [True, False]: with self._maybe_add_graph_break_to_sdpa( fullgraph ), self._reinplace_all_gather_with_optional_checks( fullgraph ), self._maybe_run_decide_global_ordering_of_comms_with_checks( fullgraph ): _, triton_codes = run_and_get_code( lambda: self._test_traceable_fsdp( *self._create_transformer_factory_fns(), "inductor", fullgraph=fullgraph, ) ) if fullgraph: self.assertTrue( len(triton_codes) == 2, "Expected two separate lowerings to Triton code, one from FWD graph and one from Compiled Autograd BWD graph", ) fwd_code = triton_codes[0] file_check = FileCheck().check("def call(args):") for fwd_ag_block_info in [ dict(overlapped_compute_op_str="triton_", num_resize=0, num_set=4), dict( overlapped_compute_op_str="aten.native_dropout.", num_resize=0, num_set=12, ), dict( overlapped_compute_op_str="aten._scaled_dot_product_efficient_attention.", num_resize=12, num_set=12, ), dict( overlapped_compute_op_str="aten._scaled_dot_product_efficient_attention.", num_resize=12, num_set=12, last_all_gather=True, ), ]: file_check = self.inductor_code_check_fsdp_all_gather( file_check, **fwd_ag_block_info ) file_check.run(fwd_code) bwd_code = triton_codes[1] file_check = FileCheck().check("def call(args):") for bwd_ag_block_info in [ dict( overlapped_compute_op_str="extern_kernels.mm(", num_resize=0, num_set=12, ), dict( overlapped_compute_op_str="aten._scaled_dot_product_efficient_attention_backward.", num_resize=0, num_set=12, ), dict( overlapped_compute_op_str="aten._scaled_dot_product_efficient_attention_backward.", num_resize=0, num_set=12, last_all_gather=True, ), ]: file_check = self.inductor_code_check_fsdp_all_gather( file_check, **bwd_ag_block_info ) for bwd_rs_block_info in [ dict(overlapped_compute_op_str="extern_kernels.mm("), dict( overlapped_compute_op_str=None ), # TODO: improve compute/comm overlap, so that `overlapped_compute_op_str` is not None dict(overlapped_compute_op_str=None), dict(overlapped_compute_op_str=None), ]: file_check = self.inductor_code_check_fsdp_reduce_scatter( file_check, **bwd_rs_block_info ) file_check.run(bwd_code) else: # TODO: when fullgraph=False and there is graph break in FWD graph, # there are several recompiles, need to figure out why. self.assertTrue( len(triton_codes) > 2, "Expected at least 3 separate lowerings to Triton code, which means at least 1 graph break in FWD graph", ) if __name__ == "__main__": run_tests()
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
test_disable_compiling_hooks
def test_disable_compiling_hooks(self): self.run_subtests( { "skip_fsdp_hooks": [False, True], }, self._test_disable_compiling_hooks, )
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestFullyShardCompileCompute(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
patched_trace_rules_check
def patched_trace_rules_check(*args, **kwargs): nonlocal trace_rules_check_count f_code = args[0] if ( hasattr(f_code, "co_filename") and f_code.co_filename.endswith(HOOKS_FILE_NAME) and f_code.co_name != HOOK_WRAPPER_NAME ): trace_rules_check_count += 1 return orig_trace_rules_check(*args, **kwargs) original_skip_fsdp_hooks = torch._dynamo.config.skip_fsdp_hooks orig_trace_rules_check = torch._dynamo.trace_rules.check torch.distributed.barrier() torch._dynamo.config.skip_fsdp_hooks = skip_fsdp_hooks torch._dynamo.trace_rules.check = patched_trace_rules_check model = MLP(4) fully_shard(model) model.compile() model(torch.randn((4, 4), device="cuda")) torch.distributed.barrier() torch._dynamo.config.skip_fsdp_hooks = original_skip_fsdp_hooks torch._dynamo.trace_rules.check = orig_trace_rules_check if skip_fsdp_hooks: self.assertEqual(trace_rules_check_count, 0) else: self.assertTrue(trace_rules_check_count > 0)
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
test_dynamo_trace_use_training_state
def test_dynamo_trace_use_training_state(self): torch._dynamo.reset() # Construct a dummy FSDPParamGroup, since we just want to test the `use_training_state` ctx manager. param_group = FSDPParamGroup( [], # params: List[nn.Parameter], (torch.nn.Linear(1, 1),), # module: Tuple[nn.Module, ...], None, # mesh_info: FSDPMeshInfo, None, # post_forward_mesh_info: Optional[FSDPMeshInfo], None, # device: torch.device, None, # mp_policy: MixedPrecisionPolicy, None, # offload_policy: OffloadPolicy, ) def f(x): param_group._training_state = TrainingState.IDLE with param_group.use_training_state(TrainingState.FORWARD): if param_group._training_state == TrainingState.FORWARD: return x + 1 else: return x inp = torch.zeros(1) self.assertEqual(param_group._training_state, TrainingState.IDLE) eager_out = f(inp) self.assertEqual(param_group._training_state, TrainingState.IDLE) self.assertEqual(eager_out, inp + 1) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") compiled_out = torch.compile(f, backend=cnt, fullgraph=True)(inp) self.assertEqual(param_group._training_state, TrainingState.IDLE) self.assertEqual(eager_out, compiled_out) self.assertEqual(cnt.frame_count, 1) self.assertEqual(cnt.op_count, 1) self.assertEqual(len(cnt.graphs), 1)
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestFullyShardCompile(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
f
def f(x): param_group._training_state = TrainingState.IDLE with param_group.use_training_state(TrainingState.FORWARD): if param_group._training_state == TrainingState.FORWARD: return x + 1 else: return x inp = torch.zeros(1) self.assertEqual(param_group._training_state, TrainingState.IDLE) eager_out = f(inp) self.assertEqual(param_group._training_state, TrainingState.IDLE) self.assertEqual(eager_out, inp + 1) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") compiled_out = torch.compile(f, backend=cnt, fullgraph=True)(inp) self.assertEqual(param_group._training_state, TrainingState.IDLE) self.assertEqual(eager_out, compiled_out) self.assertEqual(cnt.frame_count, 1) self.assertEqual(cnt.op_count, 1) self.assertEqual(len(cnt.graphs), 1)
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
test_trace_fsdp_set_
def test_trace_fsdp_set_(self): @torch.library.custom_op("mylib::add_one_out", mutates_args={"out"}) def add_one_out(x: torch.Tensor, out: torch.Tensor) -> None: torch.add(x, 1, out=out) def f(x): buf = torch.zeros(2) buf_view = buf.view(-1) torch.ops.mylib.add_one_out(x, out=buf_view) buf_view2 = buf.view(-1) torch.ops.fsdp.set_(x, buf_view2) ref_x = torch.zeros(2) x = copy.deepcopy(ref_x) f(ref_x) torch.compile(f, backend="aot_eager")(x) self.assertEqual(x, ref_x)
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestFullyShardCompile(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
f
def f(x): param_group._training_state = TrainingState.IDLE with param_group.use_training_state(TrainingState.FORWARD): if param_group._training_state == TrainingState.FORWARD: return x + 1 else: return x inp = torch.zeros(1) self.assertEqual(param_group._training_state, TrainingState.IDLE) eager_out = f(inp) self.assertEqual(param_group._training_state, TrainingState.IDLE) self.assertEqual(eager_out, inp + 1) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") compiled_out = torch.compile(f, backend=cnt, fullgraph=True)(inp) self.assertEqual(param_group._training_state, TrainingState.IDLE) self.assertEqual(eager_out, compiled_out) self.assertEqual(cnt.frame_count, 1) self.assertEqual(cnt.op_count, 1) self.assertEqual(len(cnt.graphs), 1)
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
_run_with_checks
def _run_with_checks(graph, orig_fn): self.assertTrue( _is_op_in_graph( graph, torch.ops._c10d_functional.all_gather_into_tensor.default, ) ) orig_fn(graph) self.assertFalse( _is_op_in_graph( graph, torch.ops._c10d_functional.all_gather_into_tensor.default, ) ) self.assertTrue( _is_op_in_graph( graph, torch.ops._c10d_functional.all_gather_into_tensor_out.default, ) ) if fullgraph: return mock.patch.object( comms, "reinplace_fsdp_all_gather", functools.partial( _run_with_checks, orig_fn=comms.reinplace_fsdp_all_gather, ), ) else: return contextlib.nullcontext()
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
_is_fwd_graph
def _is_fwd_graph(self, snodes): ag_copy_in_snode = None for snode in snodes: if is_fallback_op(snode.node, torch.ops.fsdp.all_gather_copy_in.default): ag_copy_in_snode = snode break self.assertTrue(ag_copy_in_snode is not None) if any( dep.name.startswith("primals_") for dep in ag_copy_in_snode.read_writes.reads ): return True else: return False
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton class TestFullyShardCompile(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
_check_fsdp_ops_in_snodes
def _check_fsdp_ops_in_snodes(snodes, is_fwd_graph, expect=True): assert_method = self.assertTrue if expect else self.assertFalse common_ops = { torch.ops.fsdp.all_gather_copy_in.default, torch.ops._c10d_functional.all_gather_into_tensor_out.default, torch.ops.fsdp.split_with_sizes_copy.default, } bwd_only_ops = { torch.ops.fsdp.chunk_cat.default, torch.ops._c10d_functional.reduce_scatter_tensor.default, } for op in common_ops: assert_method( _is_fallback_op_in_snodes( snodes, op, ), msg=f"{op}", ) if not is_fwd_graph: for op in bwd_only_ops: assert_method( _is_fallback_op_in_snodes( snodes, op, ), msg=f"{op}", )
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
test_reduce_dtype
def test_reduce_dtype(self): self.run_subtests( {"reshard_after_forward": [False, True, 2]}, self._test_reduce_dtype_fp32_reduce, ) self.run_subtests( {"reshard_after_forward": [False, True, 2]}, self._test_reduce_dtype_bf16_reduce, )
import copy import functools from typing import Dict, List, Optional, Union import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed._composable.fsdp._fsdp_collectives import ( _get_gradient_divide_factors, ) from torch.testing._internal.common_distributed import ( requires_nccl_version, SaveForwardInputsModel, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, patch_reduce_scatter, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests class TestFullyShardMixedPrecisionTraining(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
_test_reduce_dtype_fp32_reduce
def _test_reduce_dtype_fp32_reduce(self, reshard_after_forward: Union[bool, int]): param_dtype, reduce_dtype = torch.bfloat16, torch.float32 ref_model, ref_optim, model, optim = self._init_models_and_optims( reshard_after_forward, param_dtype=param_dtype, reduce_dtype=reduce_dtype ) ref_model_bf16 = copy.deepcopy(ref_model).to(param_dtype) orig_reduce_scatter = dist.reduce_scatter_tensor def assert_fn(output: torch.Tensor): self.assertEqual(output.dtype, reduce_dtype) reduce_scatter = functools.partial( reduce_scatter_with_assert, self, orig_reduce_scatter, assert_fn ) torch.manual_seed(42 + self.rank + 1) inp = torch.randn((4, 16), device="cuda", dtype=param_dtype) for iter_idx in range(10): optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) fsdp_loss = model(inp).sum() with patch_reduce_scatter(reduce_scatter): fsdp_loss.backward() optim.step() ref_optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) ref_loss = ref_model_bf16(inp.to(param_dtype)).sum() ref_loss.backward() for param in ref_model_bf16.parameters(): param.grad.data = param.grad.to(torch.float32) dist.all_reduce(param.grad) # fp32 reduction param.grad.div_(self.world_size) for param_fp32, param_bf16 in zip( ref_model.parameters(), ref_model_bf16.parameters() ): param_fp32.grad = param_bf16.grad param_bf16.grad = None ref_optim.step() # fp32 optimizer step for param_fp32, param_bf16 in zip( ref_model.parameters(), ref_model_bf16.parameters() ): param_bf16.detach().copy_(param_fp32) self.assertEqual(fsdp_loss, ref_loss) check_sharded_parity(self, ref_model, model)
import copy import functools from typing import Dict, List, Optional, Union import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed._composable.fsdp._fsdp_collectives import ( _get_gradient_divide_factors, ) from torch.testing._internal.common_distributed import ( requires_nccl_version, SaveForwardInputsModel, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, patch_reduce_scatter, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests class TestFullyShardMixedPrecisionTraining(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
assert_fn
def assert_fn(output: torch.Tensor): self.assertEqual(output.dtype, param_dtype) reduce_scatter = functools.partial( reduce_scatter_with_assert, self, orig_reduce_scatter, assert_fn ) predivide_factor, postdivide_factor = _get_gradient_divide_factors( self.process_group, all_reduce_group=None, reduce_dtype=param_dtype ) torch.manual_seed(42 + self.rank + 1) inp = torch.randn((4, 16), device="cuda", dtype=param_dtype) for iter_idx in range(10): optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) fsdp_loss = model(inp).sum() with patch_reduce_scatter(reduce_scatter): fsdp_loss.backward() optim.step() ref_optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) ref_loss = ref_model_bf16(inp.to(param_dtype)).sum() ref_loss.backward() for param in ref_model_bf16.parameters(): # Use reduce-scatter -> all-gather as all-reduce because for # world size >=4, NCCL all-reduce shows numeric differences # compared with NCCL reduce-scatter if predivide_factor is not None and predivide_factor > 1: param.grad.div_(predivide_factor) elif predivide_factor is None: param.grad.div_(self.world_size) output = torch.zeros_like(torch.chunk(param.grad, self.world_size)[0]) dist.reduce_scatter_tensor(output, param.grad) dist.all_gather_into_tensor(param.grad, output) if postdivide_factor is not None and postdivide_factor > 1: param.grad.div_(postdivide_factor) for param_fp32, param_bf16 in zip( ref_model.parameters(), ref_model_bf16.parameters() ): param_fp32.grad = param_bf16.grad.to(param_fp32.dtype) param_bf16.grad = None ref_optim.step() # fp32 optimizer step for param_fp32, param_bf16 in zip( ref_model.parameters(), ref_model_bf16.parameters() ): param_bf16.detach().copy_(param_fp32) self.assertEqual(fsdp_loss, ref_loss) check_sharded_parity(self, ref_model, model)
import copy import functools from typing import Dict, List, Optional, Union import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed._composable.fsdp._fsdp_collectives import ( _get_gradient_divide_factors, ) from torch.testing._internal.common_distributed import ( requires_nccl_version, SaveForwardInputsModel, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, patch_reduce_scatter, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
assert_fn
def assert_fn(output: torch.Tensor): self.assertEqual(output.dtype, param_dtype) reduce_scatter = functools.partial( reduce_scatter_with_assert, self, orig_reduce_scatter, assert_fn ) predivide_factor, postdivide_factor = _get_gradient_divide_factors( self.process_group, all_reduce_group=None, reduce_dtype=param_dtype ) torch.manual_seed(42 + self.rank + 1) inp = torch.randn((4, 16), device="cuda", dtype=param_dtype) for iter_idx in range(10): optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) fsdp_loss = model(inp).sum() with patch_reduce_scatter(reduce_scatter): fsdp_loss.backward() optim.step() ref_optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) ref_loss = ref_model_bf16(inp.to(param_dtype)).sum() ref_loss.backward() for param in ref_model_bf16.parameters(): # Use reduce-scatter -> all-gather as all-reduce because for # world size >=4, NCCL all-reduce shows numeric differences # compared with NCCL reduce-scatter if predivide_factor is not None and predivide_factor > 1: param.grad.div_(predivide_factor) elif predivide_factor is None: param.grad.div_(self.world_size) output = torch.zeros_like(torch.chunk(param.grad, self.world_size)[0]) dist.reduce_scatter_tensor(output, param.grad) dist.all_gather_into_tensor(param.grad, output) if postdivide_factor is not None and postdivide_factor > 1: param.grad.div_(postdivide_factor) for param_fp32, param_bf16 in zip( ref_model.parameters(), ref_model_bf16.parameters() ): param_fp32.grad = param_bf16.grad.to(param_fp32.dtype) param_bf16.grad = None ref_optim.step() # fp32 optimizer step for param_fp32, param_bf16 in zip( ref_model.parameters(), ref_model_bf16.parameters() ): param_bf16.detach().copy_(param_fp32) self.assertEqual(fsdp_loss, ref_loss) check_sharded_parity(self, ref_model, model)
import copy import functools from typing import Dict, List, Optional, Union import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed._composable.fsdp._fsdp_collectives import ( _get_gradient_divide_factors, ) from torch.testing._internal.common_distributed import ( requires_nccl_version, SaveForwardInputsModel, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, patch_reduce_scatter, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
test_grad_acc_with_reduce_dtype
def test_grad_acc_with_reduce_dtype(self): """ Tests that gradient accumulation without reduce-scatter when using bf16 compute and fp32 reduction accumulates the unsharded gradients in fp32. """ self.run_subtests( {"reshard_after_forward": [True, False]}, self._test_grad_acc_with_reduce_dtype, )
import copy import functools from typing import Dict, List, Optional, Union import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed._composable.fsdp._fsdp_collectives import ( _get_gradient_divide_factors, ) from torch.testing._internal.common_distributed import ( requires_nccl_version, SaveForwardInputsModel, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, patch_reduce_scatter, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests class TestFullyShardMixedPrecisionTraining(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
_test_grad_acc_with_reduce_dtype
def _test_grad_acc_with_reduce_dtype(self, reshard_after_forward: bool): torch.manual_seed(42) param_dtype, reduce_dtype = (torch.bfloat16, torch.float32) mp_policy = MixedPrecisionPolicy( param_dtype=param_dtype, reduce_dtype=reduce_dtype ) model = nn.Sequential(*[MLP(16, torch.device("cpu")) for _ in range(3)]) # To emulate the mixed precision implementation where forward/backward # compute use bf16 and optimizer uses fp32, we maintain both an fp32 # and a bf16 copy of the reference model ref_model = copy.deepcopy(model).cuda() ref_model_compute = copy.deepcopy(ref_model).to(param_dtype) ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2) for mlp in model: fully_shard( mlp, reshard_after_forward=reshard_after_forward, mp_policy=mp_policy ) fully_shard( model, reshard_after_forward=reshard_after_forward, mp_policy=mp_policy ) optim = torch.optim.Adam(model.parameters(), lr=1e-2) orig_reduce_scatter = dist.reduce_scatter_tensor def assert_fn(output: torch.Tensor): self.assertEqual(output.dtype, reduce_dtype) reduce_scatter = functools.partial( reduce_scatter_with_assert, self, orig_reduce_scatter, assert_fn ) torch.manual_seed(42 + self.rank + 1) device = torch.device("cuda") # Train on the same input to avoid loss explosion num_microbatches = 4 inp = torch.randn((2 * num_microbatches, 16), device=device, dtype=param_dtype) for iter_idx in range(10): microbatch_inps = torch.chunk(inp, 4) for microbatch_idx in range(num_microbatches): is_last_microbatch = microbatch_idx == num_microbatches - 1 model.set_requires_gradient_sync(is_last_microbatch) model.set_reshard_after_backward( is_last_microbatch or reshard_after_forward ) losses: List[torch.Tensor] = [] for _model in (ref_model_compute, model): losses.append( _model(microbatch_inps[microbatch_idx].detach()).sum() ) self.assertEqual(losses[-1].dtype, param_dtype) with patch_reduce_scatter(reduce_scatter): losses[-1].backward() self.assertEqual(losses[0], losses[1]) # Manually accumulate gradients into the base reference model # from the compute reference model in fp32 for ref_param, ref_param_compute in zip( ref_model.parameters(), ref_model_compute.parameters() ): self.assertTrue(ref_param_compute.grad is not None) self.assertEqual(ref_param.dtype, torch.float32) if ref_param.grad is not None: ref_param.grad += ref_param_compute.grad else: ref_param.grad = ref_param_compute.grad.to(ref_param.dtype) ref_param_compute.grad = None # Manually reduce gradients for the reference model on the last # microbatch to implement data parallelism if is_last_microbatch: for ref_param in ref_model.parameters(): self.assertTrue(ref_param.grad is not None) dist.all_reduce(ref_param.grad) ref_param.grad /= self.world_size check_sharded_parity(self, ref_model, model) ref_optim.step() optim.step() ref_optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) # Manually copy parameters from the base reference model to the # compute reference model to run the optimizer step for the latter for ref_param, ref_param_compute in zip( ref_model.parameters(), ref_model_compute.parameters() ): ref_param_compute.detach().copy_(ref_param)
import copy import functools from typing import Dict, List, Optional, Union import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed._composable.fsdp._fsdp_collectives import ( _get_gradient_divide_factors, ) from torch.testing._internal.common_distributed import ( requires_nccl_version, SaveForwardInputsModel, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, patch_reduce_scatter, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests class TestFullyShardMixedPrecisionTraining(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
assert_fn
def assert_fn(output: torch.Tensor): self.assertEqual(output.dtype, param_dtype) reduce_scatter = functools.partial( reduce_scatter_with_assert, self, orig_reduce_scatter, assert_fn ) predivide_factor, postdivide_factor = _get_gradient_divide_factors( self.process_group, all_reduce_group=None, reduce_dtype=param_dtype ) torch.manual_seed(42 + self.rank + 1) inp = torch.randn((4, 16), device="cuda", dtype=param_dtype) for iter_idx in range(10): optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) fsdp_loss = model(inp).sum() with patch_reduce_scatter(reduce_scatter): fsdp_loss.backward() optim.step() ref_optim.zero_grad(set_to_none=(iter_idx % 2 == 0)) ref_loss = ref_model_bf16(inp.to(param_dtype)).sum() ref_loss.backward() for param in ref_model_bf16.parameters(): # Use reduce-scatter -> all-gather as all-reduce because for # world size >=4, NCCL all-reduce shows numeric differences # compared with NCCL reduce-scatter if predivide_factor is not None and predivide_factor > 1: param.grad.div_(predivide_factor) elif predivide_factor is None: param.grad.div_(self.world_size) output = torch.zeros_like(torch.chunk(param.grad, self.world_size)[0]) dist.reduce_scatter_tensor(output, param.grad) dist.all_gather_into_tensor(param.grad, output) if postdivide_factor is not None and postdivide_factor > 1: param.grad.div_(postdivide_factor) for param_fp32, param_bf16 in zip( ref_model.parameters(), ref_model_bf16.parameters() ): param_fp32.grad = param_bf16.grad.to(param_fp32.dtype) param_bf16.grad = None ref_optim.step() # fp32 optimizer step for param_fp32, param_bf16 in zip( ref_model.parameters(), ref_model_bf16.parameters() ): param_bf16.detach().copy_(param_fp32) self.assertEqual(fsdp_loss, ref_loss) check_sharded_parity(self, ref_model, model)
import copy import functools from typing import Dict, List, Optional, Union import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed._composable.fsdp._fsdp_collectives import ( _get_gradient_divide_factors, ) from torch.testing._internal.common_distributed import ( requires_nccl_version, SaveForwardInputsModel, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, patch_reduce_scatter, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
test_submodules_with_external_inputs
def test_submodules_with_external_inputs(self): self.run_subtests( {"enable_submodule_cast": [False, True]}, self._test_submodules_with_external_inputs, )
import copy import functools from typing import Dict, List, Optional, Union import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard, MixedPrecisionPolicy from torch.distributed._composable.fsdp._fsdp_collectives import ( _get_gradient_divide_factors, ) from torch.testing._internal.common_distributed import ( requires_nccl_version, SaveForwardInputsModel, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, patch_reduce_scatter, reduce_scatter_with_assert, ) from torch.testing._internal.common_utils import run_tests class TestFullyShardMixedPrecisionCasts(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
test_unshard_async
def test_unshard_async(self): class ReduceModule(nn.Module): def __init__(self, dim: int, mesh: DeviceMesh): super().__init__() self.mesh = mesh self.weight = nn.Parameter(torch.randn(dim, dim)) def forward(self, x: torch.Tensor): y = F.relu(x @ self.weight) # NOTE: This all-reduce is not differentiable and is included # to exercise the overlap. work = dist.all_reduce(y, group=self.mesh.get_group(), async_op=True) return y, work class MLPs(nn.Module): def __init__(self, dim: int): super().__init__() self.mlp1 = MLP(dim) self.mlp2 = MLP(dim) self.mlp3 = MLP(dim) def forward(self, ys: List[torch.Tensor], works: List[dist.Work]): (y1, y2, y3), (work1, work2, work3) = ys, works work1.wait() z1 = self.mlp1(y1) work2.wait() z2 = self.mlp2(y2) work3.wait() z3 = self.mlp3(y3) return z1 + z2 + z3 class ReduceModel(nn.Module): def __init__(self, dim: int, mesh: DeviceMesh): super().__init__() self.reduce_module1 = ReduceModule(dim, mesh) self.reduce_module2 = ReduceModule(dim, mesh) self.reduce_module3 = ReduceModule(dim, mesh) self.mlps = MLPs(dim) def forward(self, x: torch.Tensor): y1, work1 = self.reduce_module1(x) if isinstance(self.mlps.mlp1, FSDPModule): self.mlps.mlp1.unshard(async_op=True) y2, work2 = self.reduce_module2(x) if isinstance(self.mlps.mlp2, FSDPModule): self.mlps.mlp2.unshard(async_op=True) y3, work3 = self.reduce_module3(x) if isinstance(self.mlps.mlp3, FSDPModule): self.mlps.mlp3.unshard(async_op=True) return self.mlps([y1, y2, y3], [work1, work2, work3]) mesh = init_device_mesh("cuda", (self.world_size,)) batch_size, dim = 2, 8 torch.manual_seed(42) ref_model = replicate(ReduceModel(dim, mesh).cuda()) ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2) torch.manual_seed(42) model = ReduceModel(dim, mesh) fully_shard(model.mlps.mlp1, reshard_after_forward=False) fully_shard(model.mlps.mlp2, reshard_after_forward=False) fully_shard(model.mlps.mlp3, reshard_after_forward=False) fully_shard(model.mlps) replicate(model.cuda()) optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True) torch.manual_seed(42 + self.rank + 1) inp = torch.randn((batch_size, dim), device="cuda") for _ in range(10): losses: List[torch.Tensor] = [] for _model, _optim in ((ref_model, ref_optim), (model, optim)): losses.append(_model(inp).sum()) losses[-1].backward() with implicit_replication(): _optim.step() _optim.zero_grad() self.assertEqual(losses[0], losses[1])
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class TestFullyShardUnshardMultiProcess(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
__init__
def __init__(self) -> None: super().__init__() self.unused_lin = nn.Linear(1, 1) self.lin = nn.Linear(16, 16)
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class ModuleWithUnusedLinear(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
forward
def forward(self, x: torch.Tensor) -> torch.Tensor: return nn.functional.relu(self.lin(x))
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class ModuleWithUnusedLinear(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
__init__
def __init__(self) -> None: super().__init__() self.unused_lin = nn.Linear(1, 1) self.lin = nn.Linear(16, 16)
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class ModuleWithUnusedLinear(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
forward
def forward(self, x: torch.Tensor) -> torch.Tensor: return nn.functional.relu(self.lin(x))
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class ModuleWithUnusedLinear(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
__init__
def __init__(self) -> None: super().__init__() self.unused_lin = nn.Linear(1, 1) self.lin = nn.Linear(16, 16)
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class ModuleWithUnusedLinear(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
forward
def forward(self, x: torch.Tensor) -> torch.Tensor: return nn.functional.relu(self.lin(x))
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class ModuleWithUnusedLinear(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
test_unshard_without_lazy_init
def test_unshard_without_lazy_init(self): torch.manual_seed(42) model = MLP(4) for param in model.parameters(): dist.broadcast(param, src=0) ref_model = copy.deepcopy(model) fully_shard(model) model.unshard() # no lazy init yet for ref_param, param in zip(ref_model.parameters(), model.parameters()): self.assertEqual(ref_param, param)
import copy import functools import itertools import unittest from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( FSDPModule, fully_shard, MixedPrecisionPolicy, OffloadPolicy, ) from torch.distributed._composable.fsdp._fsdp_collectives import ( _div_if_needed, _get_gradient_divide_factors, foreach_all_gather, foreach_all_gather_copy_out, foreach_reduce, ) from torch.distributed._composable.fsdp._fsdp_common import FSDPMeshInfo, TrainingState from torch.distributed._composable.fsdp._fsdp_init import ( _get_post_forward_mesh_info, _init_default_fully_shard_mesh, ) from torch.distributed._composable.fsdp._fsdp_param import ShardedState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import DTensor from torch.distributed._tensor.experimental import implicit_replication from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, DoubleLinear, FSDPTest, FSDPTestMultiThread, MLP, patch_post_backward, patch_reshard, patch_unshard, ) from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d EventType = Tuple[str, str, TrainingState] class TestFullyShardUnshardMultiThread(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_compile.py
_is_op_in_graph
def _is_op_in_graph(graph, op): return any(node.target is op for node in graph.nodes)
import contextlib import copy import functools import unittest from unittest import mock import torch import torch._dynamo.testing import torch.distributed._composable.fsdp._fsdp_param import torch.nn.functional as F from torch import nn from torch._dynamo import compiled_autograd from torch._inductor import comms from torch._inductor.utils import is_fallback_op, run_and_get_code from torch.distributed._composable.fsdp import fully_shard from torch.distributed._composable.fsdp._fsdp_common import TrainingState from torch.distributed._composable.fsdp._fsdp_param_group import FSDPParamGroup from torch.distributed._tensor import init_device_mesh from torch.testing import FileCheck from torch.testing._internal.common_distributed import at_least_x_gpu, skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, MLP from torch.testing._internal.common_utils import run_tests, skipIfRocm from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, ) from torch.utils._triton import has_triton
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_state_dict.py
test_hsdp_tp_state_dict_save_load
def test_hsdp_tp_state_dict_save_load(self): global_mesh = init_device_mesh( "cuda", (2, 2, self.world_size // 4), mesh_dim_names=("dp_replicate", "dp_shard", "tp"), ) self.run_subtests( {"mlp_dim": [4, 6, 8, 10]}, functools.partial(self._test_hsdp_tp_state_dict_save_load, global_mesh), )
import copy import functools import unittest from typing import Dict import torch import torch.nn as nn from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard from torch.distributed._tensor import distribute_tensor, DTensor from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardStateDictMultiProcess(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_state_dict.py
_test_hsdp_tp_state_dict_save_load
def _test_hsdp_tp_state_dict_save_load(self, global_mesh: DeviceMesh, mlp_dim: int): dp_mesh, tp_mesh = global_mesh["dp_replicate", "dp_shard"], global_mesh["tp"] torch.manual_seed(42) model = nn.Sequential(*[MLP(mlp_dim) for _ in range(3)]) model = parallelize_module( model, device_mesh=tp_mesh, parallelize_plan={ "0.in_proj": ColwiseParallel(), "0.out_proj": RowwiseParallel(), "1.in_proj": ColwiseParallel(), "1.out_proj": RowwiseParallel(), "2.in_proj": ColwiseParallel(), "2.out_proj": RowwiseParallel(), }, ) for mlp in model: fully_shard(mlp, mesh=dp_mesh) fully_shard(model, mesh=dp_mesh) self._test_state_dict_save_load(model)
import copy import functools import unittest from typing import Dict import torch import torch.nn as nn from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard from torch.distributed._tensor import distribute_tensor, DTensor from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardStateDictMultiProcess(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_state_dict.py
_test_state_dict_save_load
def _test_state_dict_save_load(self, model: nn.Module): for param_name, param in model.named_parameters(): self.assertIsInstance( param, DTensor, f"Expects parameters to be sharded as DTensors but got {param_name} " f"as {type(param)}: {param}", ) old_fill_value = 1 new_fill_value = 42 + self.rank with torch.no_grad(): for param in model.parameters(): param.fill_(old_fill_value) # Use that the parameters are currently sharded, meaning that their # data pointers correspond to the sharded parameter data param_name_to_data_ptr = { n: p.to_local().data_ptr() for n, p in model.named_parameters() } ref_sharded_sizes = [p.size() for p in model.parameters()] state_dict = model.state_dict() for param, ref_sharded_size in zip(model.parameters(), ref_sharded_sizes): self.assertEqual(param.size(), ref_sharded_size) self.assertTrue(isinstance(param, nn.Parameter)) # Verify that keys match, values are DTensors, and values share the # same storage as the existing sharded parameter data self.assertEqual(set(state_dict.keys()), set(param_name_to_data_ptr.keys())) for param_name, tensor in state_dict.items(): self.assertTrue(isinstance(tensor, DTensor)) if param_name_to_data_ptr[param_name] == 0: # Check that this is padding (added by DTensor) self.assertGreater(self.rank, 0) self.assertEqual(torch.count_nonzero(tensor.to_local()).item(), 0) else: self.assertEqual( tensor.to_local().data_ptr(), param_name_to_data_ptr[param_name] ) # Verify that we can load a new state dict that contains DTensors with # storages different from the current model parameters new_state_dict: Dict[str, DTensor] = {} for param_name, dtensor in state_dict.items(): # Construct new DTensors to exercise load state dict writeback new_state_dict[param_name] = dtensor.detach().clone().fill_(new_fill_value) for param in model.parameters(): self.assertEqual( param.to_local(), torch.ones_like(param.to_local()) * old_fill_value, ) model.load_state_dict(new_state_dict) for param_name, param in model.named_parameters(): self.assertEqual( param.to_local(), torch.ones_like(param.to_local()) * new_fill_value, ) local_param = param.to_local() # Only guarantee that the local tensor's data pointer does not # change if the sharding was even (i.e. no padding); otherwise, # FSDP may re-pad the local tensor, changing its data pointer if local_param.size(0) * param.device_mesh.size() == param.size(0): self.assertEqual( local_param.data_ptr(), param_name_to_data_ptr[param_name] )
import copy import functools import unittest from typing import Dict import torch import torch.nn as nn from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard from torch.distributed._tensor import distribute_tensor, DTensor from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardStateDictMultiProcess(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_state_dict.py
test_rank0_offload_full_state_dict
def test_rank0_offload_full_state_dict(self): # Construct a reference unsharded model on all ranks model_args = ModelArgs(dropout_p=0.0) torch.manual_seed(42) ref_model = Transformer(model_args).cuda() for param in ref_model.parameters(): torch.distributed.broadcast(param.detach(), src=0) # Construct a sharded model and sharded state dict on all ranks model = copy.deepcopy(ref_model) for module in model.modules(): if isinstance(module, TransformerBlock): fully_shard(module) fully_shard(model) sharded_sd = model.state_dict() # Save a reference CPU full state dict on rank 0 and delete the # reference model otherwise if self.rank != 0: del ref_model else: ref_gpu_full_sd = ref_model.state_dict() ref_full_sd = {k: v.cpu() for k, v in ref_gpu_full_sd.items()} del ref_gpu_full_sd # Reshard the GPU sharded state dict to a CPU full state dict on rank 0 full_sd = {} for param_name, sharded_param in sharded_sd.items(): full_param = sharded_param.full_tensor() if self.rank == 0: full_sd[param_name] = full_param.cpu() else: del full_param # Check that we have a CPU full state dict only on rank 0 if self.rank == 0: self.assertEqual(len(full_sd), len(ref_full_sd)) self.assertEqual(list(full_sd.keys()), list(ref_full_sd.keys())) for (param_name, param), ref_param in zip( full_sd.items(), ref_full_sd.values() ): self.assertEqual(param.device, torch.device("cpu")) self.assertEqual(param.device, ref_param.device) self.assertEqual(param, ref_param) else: self.assertEqual(len(full_sd), 0)
import copy import functools import unittest from typing import Dict import torch import torch.nn as nn from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard from torch.distributed._tensor import distribute_tensor, DTensor from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardStateDictMultiThread(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_root_move_forward_input_to_device
def test_root_move_forward_input_to_device(self): device = torch.device("cuda", 0) class ParamlessModule(nn.Module): def forward(self, x: torch.Tensor, ys: Tuple[torch.Tensor, ...]): # Check that FSDP moved the inputs to GPU, including recursing # into the tuple data structure assert x.device == device, f"Expects {device} but got {x.device}" assert ( ys[0].device == device ), f"Expects {device} but got {ys[0].device}" assert ( ys[1].device == device ), f"Expects {device} but got {ys[1].device}" y = ys[0] + ys[1] return x + y + 1 model = ParamlessModule() fully_shard(model) x = torch.randn((3,)) ys = (torch.randn((3,)), torch.randn((3,))) self.assertEqual(x.device, torch.device("cpu")) self.assertEqual(ys[0].device, torch.device("cpu")) self.assertEqual(ys[1].device, torch.device("cpu")) model(x, ys)
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShardForwardInputs(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
forward
def forward(self, x: torch.Tensor, ys: Tuple[torch.Tensor, ...]): # Check that FSDP moved the inputs to GPU, including recursing # into the tuple data structure assert x.device == device, f"Expects {device} but got {x.device}" assert ( ys[0].device == device ), f"Expects {device} but got {ys[0].device}" assert ( ys[1].device == device ), f"Expects {device} but got {ys[1].device}" y = ys[0] + ys[1] return x + y + 1
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class ParamlessModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_param_registration_after_backward
def test_param_registration_after_backward(self): """Tests the parameter registration after backward.""" device = torch.device("cuda", 0) # Single FSDP group for reshard_after_forward in (True, False, 2): model = MLP(8, device) fully_shard(model, reshard_after_forward=reshard_after_forward) # root only inp = torch.randn((2, 8), device="cuda") self._assert_dtensor_params(model.parameters()) model(inp).sum().backward() self._assert_dtensor_params(model.parameters()) # Multiple FSDP groups for reshard_after_forward in (True, False, 2): model = MLP(8, device) fully_shard(model.in_proj, reshard_after_forward=reshard_after_forward) fully_shard(model.out_proj, reshard_after_forward=reshard_after_forward) fully_shard(model, reshard_after_forward=reshard_after_forward) self._assert_dtensor_params(model.parameters()) model(inp).sum().backward() self._assert_dtensor_params(model.parameters())
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShardRegisteredParams(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
_assert_tensor_params
def _assert_tensor_params(self, params: Iterable[nn.Parameter]): self.assertGreater(len(list(params)), 0) for param in params: self.assertNotIsInstance(param, DTensor) self.assertIsInstance(param, torch.Tensor)
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShardRegisteredParams(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_state.py
test_fully_shard_deepcopy
def test_fully_shard_deepcopy(self): model = MLP(8) fully_shard(model) with self.assertRaisesRegex(AssertionError, "FSDP does not support deepcopy"): copy.deepcopy(model)
import copy import unittest import torch.nn as nn from torch.distributed._composable.fsdp import FSDPModule, fully_shard from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests class TestFullyShardState(FSDPTestMultiThread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_state_dict.py
test_dp_state_dict_save_load
def test_dp_state_dict_save_load(self): fsdp_mesh = init_device_mesh("cuda", (self.world_size,)) self.run_subtests( {"mlp_dim": [2, 3, 4, 5], "mesh": [fsdp_mesh]}, self._test_dp_state_dict_save_load, ) if self.world_size % 2 != 0: return hsdp_mesh = init_device_mesh("cuda", (self.world_size // 2, 2)) self.run_subtests( {"mlp_dim": [2, 3, 4, 5], "mesh": [hsdp_mesh]}, self._test_dp_state_dict_save_load, )
import copy import functools import unittest from typing import Dict import torch import torch.nn as nn from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard from torch.distributed._tensor import distribute_tensor, DTensor from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardStateDictMultiProcess(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_state_dict.py
test_dp_state_dict_cpu_offload
def test_dp_state_dict_cpu_offload(self): mlp_dim = 4 offload_policy = CPUOffloadPolicy(pin_memory=True) torch.manual_seed(42) with torch.device("meta"): model = nn.Sequential( nn.Linear(mlp_dim, mlp_dim, bias=False), nn.Linear(mlp_dim, mlp_dim, bias=False), ) for module in model: fully_shard(module, offload_policy=offload_policy) fully_shard(model, offload_policy=offload_policy) # split full sd into multiple pieces # to test loading with `strict=False` state_dicts = [] for name, dtensor in model.named_parameters(): full_tensor = torch.randn(dtensor.size()) sharded_tensor = distribute_tensor( full_tensor, dtensor.device_mesh, dtensor.placements ) state_dicts.append({name: sharded_tensor}) # check that we can load with some parameters still on meta device for sd in state_dicts: model.load_state_dict(sd, assign=True, strict=False) # lazy init without error inp = torch.rand((mlp_dim, mlp_dim), device="cuda") model(inp) state_dict = model.state_dict() for name, dtensor in state_dict.items(): self.assertEqual(dtensor.device.type, "cpu")
import copy import functools import unittest from typing import Dict import torch import torch.nn as nn from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard from torch.distributed._tensor import distribute_tensor, DTensor from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardStateDictMultiProcess(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_state_dict.py
test_dp_tp_state_dict_save_load
def test_dp_tp_state_dict_save_load(self): dp_size = 2 global_mesh = init_device_mesh( "cuda", (dp_size, self.world_size // dp_size), mesh_dim_names=("dp", "tp") ) self.run_subtests( {"mlp_dim": [4, 6, 8, 10]}, functools.partial(self._test_dp_tp_state_dict_save_load, global_mesh), )
import copy import functools import unittest from typing import Dict import torch import torch.nn as nn from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard from torch.distributed._tensor import distribute_tensor, DTensor from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, FSDPTestMultiThread, MLP from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) class TestFullyShardStateDictMultiProcess(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
ref_fwd_bwd
def ref_fwd_bwd(): with patch_all_gather(delayed_all_gather): # Run dummy all-gathers per weight (which is one FSDP group) for lin in ref_model: dummy_ag_output = torch.empty_like(lin.weight) dummy_ag_input = torch.chunk(dummy_ag_output, self.world_size)[ self.rank ] dist.all_gather_into_tensor(dummy_ag_output, dummy_ag_input) loss = ref_model(inp).sum() # Run dummy all-gathers per weight again since we are # resharding after forward for lin in ref_model: dummy_ag_output = torch.empty_like(lin.weight) dummy_ag_input = torch.chunk(dummy_ag_output, self.world_size)[ self.rank ] dist.all_gather_into_tensor(dummy_ag_output, dummy_ag_input) loss.backward() # Run dummy reduce-scatters per weight for lin in ref_model: dummy_rs_input = torch.empty_like(lin.weight) dummy_rs_output = torch.chunk(dummy_rs_input, self.world_size)[ self.rank ] dist.reduce_scatter_tensor(dummy_rs_output, dummy_rs_input)
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
fwd_bwd
def fwd_bwd(): with patch_all_gather(delayed_all_gather), patch_reduce_scatter( delayed_reduce_scatter ): loss = model(inp).sum() loss.backward() ref_fwd_bwd_time = self._time_fn(ref_fwd_bwd) fwd_bwd_time = self._time_fn(fwd_bwd) # Backward: only 1st all-gather and last reduce-scatter are exposed; # double the backward compute since computing two gradients per layer # NOTE: Do not enforce the expected forward-backward time due to # flakiness in CI # expected_bwd_time = ( # comm_sleep_ms * 2 + num_linears * 2 * compute_sleep_ms + buffer_ms * 2 # ) self.assertLessEqual(fwd_bwd_time, ref_fwd_bwd_time)
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
test_fully_shard_post_optim_event_overlap
def test_fully_shard_post_optim_event_overlap(self): torch.manual_seed(42) # Use non-trivial comm. time but still shorter than compute time dim, compute_sleep_ms, comm_sleep_ms = (4, 25, 10) # Define the model to have a high-compute linear followed by a # low-compute linear, where only the low-compute linear uses FSDP model = nn.Sequential( LinearWithSleep(dim, compute_sleep_ms), nn.Linear(dim, dim) ).cuda() fully_shard(model[1], reshard_after_forward=False) optim = torch.optim.AdamW(model.parameters(), lr=1e-2) orig_all_gather_into_tensor = dist.all_gather_into_tensor def delayed_all_gather(*args, **kwargs): torch.cuda._sleep(int(comm_sleep_ms * get_cycles_per_ms())) return orig_all_gather_into_tensor(*args, **kwargs) inp = torch.randn((2, dim), device="cuda") def run_train_steps(num_iters: int, use_post_optim_event: bool): for _ in range(num_iters): optim.zero_grad() with patch_all_gather(delayed_all_gather): loss = model(inp).sum() loss.backward() with implicit_replication(): optim.step() if use_post_optim_event: post_optim_event = torch.cuda.current_stream().record_event() model[1].set_post_optim_event(post_optim_event) run_train_steps(1, False) # warmup CUDA and allocator num_iters = 5 baseline_time = self._time_fn( functools.partial(run_train_steps, num_iters, False) ) test_time = self._time_fn(functools.partial(run_train_steps, num_iters, True)) buffer_ms = 4 # CPU delays and copies # Baseline: FSDP all-gather is exposed since the FSDP module waits for # the current stream and hence the high-compute linear self.assertLessEqual( baseline_time, num_iters * (3 * compute_sleep_ms + comm_sleep_ms + buffer_ms), ) # Test: FSDP all-gather is overlapped with the high-compute linear # since the FSDP module only waits for the post-optim event (except on # the 1st iteration when no event has been recorded) expected_test_time = ( num_iters * (3 * compute_sleep_ms + buffer_ms) + comm_sleep_ms ) self.assertLessEqual(test_time, expected_test_time) # Since `get_cycles_per_ms` uses lru cache, there may be some variance # between the initially determined cycles vs. the current cycles per # ms, so we relax the baseline check to just that it is greater than # the test time rather than the expected test time self.assertGreater(baseline_time, test_time)
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests class TestFullyShardOverlap(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
delayed_all_gather
def delayed_all_gather(*args, **kwargs): delay_collective() return orig_all_gather_into_tensor(*args, **kwargs)
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
run_train_steps
def run_train_steps(num_iters: int, use_post_optim_event: bool): for _ in range(num_iters): optim.zero_grad() with patch_all_gather(delayed_all_gather): loss = model(inp).sum() loss.backward() with implicit_replication(): optim.step() if use_post_optim_event: post_optim_event = torch.cuda.current_stream().record_event() model[1].set_post_optim_event(post_optim_event) run_train_steps(1, False) # warmup CUDA and allocator num_iters = 5 baseline_time = self._time_fn( functools.partial(run_train_steps, num_iters, False) ) test_time = self._time_fn(functools.partial(run_train_steps, num_iters, True)) buffer_ms = 4 # CPU delays and copies # Baseline: FSDP all-gather is exposed since the FSDP module waits for # the current stream and hence the high-compute linear self.assertLessEqual( baseline_time, num_iters * (3 * compute_sleep_ms + comm_sleep_ms + buffer_ms), ) # Test: FSDP all-gather is overlapped with the high-compute linear # since the FSDP module only waits for the post-optim event (except on # the 1st iteration when no event has been recorded) expected_test_time = ( num_iters * (3 * compute_sleep_ms + buffer_ms) + comm_sleep_ms ) self.assertLessEqual(test_time, expected_test_time) # Since `get_cycles_per_ms` uses lru cache, there may be some variance # between the initially determined cycles vs. the current cycles per # ms, so we relax the baseline check to just that it is greater than # the test time rather than the expected test time self.assertGreater(baseline_time, test_time)
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
_time_fn
def _time_fn(self, fn: Callable): start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) dist.barrier() torch.cuda.synchronize() start_event.record() fn() end_event.record() torch.cuda.synchronize() elapsed_time = start_event.elapsed_time(end_event) return elapsed_time
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests class TestFullyShardOverlap(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
backward
def backward(ctx, grad_output: torch.Tensor): (input, weight) = ctx.saved_tensors torch.cuda._sleep(int(2 * ctx.sleep_ms * get_cycles_per_ms())) grad_input = grad_output @ weight.T grad_weight = input.T @ grad_output return grad_input, grad_weight, None
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests class Matmul(torch.autograd.Function):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
__init__
def __init__(self, dim: int, sleep_ms: int): super().__init__() self.weight = nn.Parameter(torch.randn((dim, dim))) self.sleep_ms = sleep_ms
import copy import functools from typing import Callable import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable.fsdp import fully_shard from torch.distributed._tensor.experimental import implicit_replication from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( FSDPTest, patch_all_gather, patch_reduce_scatter, ) from torch.testing._internal.common_utils import get_cycles_per_ms, run_tests class LinearWithSleep(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
__init__
def __init__(self, device: torch.device): super().__init__() self.inner = nn.Linear(4, 4, device=device) self.outer = nn.Linear(4, 5, device=device)
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class MultiForwardModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
forward
def forward(self, x: torch.Tensor, ys: Tuple[torch.Tensor, ...]): # Check that FSDP moved the inputs to GPU, including recursing # into the tuple data structure assert x.device == device, f"Expects {device} but got {x.device}" assert ( ys[0].device == device ), f"Expects {device} but got {ys[0].device}" assert ( ys[1].device == device ), f"Expects {device} but got {ys[1].device}" y = ys[0] + ys[1] return x + y + 1
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class ParamlessModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_post_optim_event
def test_post_optim_event(self): torch.manual_seed(42) model_args = ModelArgs(dropout_p=0.0) model = Transformer(model_args) ref_model = replicate(copy.deepcopy(model).cuda()) ref_optim = torch.optim.AdamW(ref_model.parameters(), lr=1e-2) for layer in itertools.chain(model.layers, [model]): fully_shard(layer) optim = torch.optim.AdamW(model.parameters(), lr=1e-2) def step_post_hook( fsdp_module: FSDPModule, opt: torch.optim.Optimizer, args, kwargs ) -> None: post_optim_event = torch.cuda.current_stream().record_event() fsdp_module.set_post_optim_event(post_optim_event) optim.register_step_post_hook(functools.partial(step_post_hook, model)) torch.manual_seed(42 + self.rank) inp = torch.randint(0, model_args.vocab_size, (2, 8), device="cuda") # Track all losses and check for equality at the end to avoid a CPU # sync point after each iteration ref_losses: List[torch.Tensor] = [] losses: List[torch.Tensor] = [] for iter_idx in range(10): ref_optim.zero_grad() ref_losses.append(ref_model(inp).sum()) ref_losses[-1].backward() ref_optim.step() for iter_idx in range(10): optim.zero_grad() losses.append(model(inp).sum()) losses[-1].backward() optim.step() # Sleep after the optimizer step to allow CPU to run ahead into the # next iteration's forward, exercising the post-optim stream sync torch.cuda._sleep(int(25 * get_cycles_per_ms())) for ref_loss, loss in zip(ref_losses, losses): self.assertEqual(ref_loss, loss)
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShard1DTrainingCore(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_train_parity_with_activation_checkpointing
def test_train_parity_with_activation_checkpointing(self): """ Tests train parity against DDP when composing with activation checkpointing. """ self.run_subtests( { "reshard_after_forward": [True, False], "checkpoint_impl": ["composable", "utils", "wrapper"], "module_grouping": ["block", "mem_eff", "mem_eff_weight_tied"], }, self._test_train_parity_with_activation_checkpointing, )
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShard1DTrainingCompose(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_train_parity_with_shared_params
def test_train_parity_with_shared_params(self): self.run_subtests( { "reshard_after_forward": [False, True], "use_activation_checkpointing": [False, True], }, self._test_train_shared_params, )
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShardSharedParams(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_gradient_accumulation
def test_gradient_accumulation(self): """ Tests gradient accumulation with/without gradient reduction and with/without resharding after backward. """ meshes = [init_device_mesh("cuda", (self.world_size,))] # always test FSDP if self.world_size == 4: # test HSDP too if enough GPUs shard_size, replicate_size = 2, 2 meshes.append(init_device_mesh("cuda", (replicate_size, shard_size))) self.run_subtests( { "mesh": meshes, "reshard_after_forward": [True, False, 2], # "all": disable reduce-scatter for all modules # "root_only": disable reduce-scatter for root's linear only # "some_mlps": disable reduce-scatter for some MLPs "mode": ["all", "root_only", "some_mlps"], "reshard_after_backward": [False, True], "offload_policy": [OffloadPolicy(), CPUOffloadPolicy()], # For HSDP only: # `True`: reduce-scatter only (no all-reduce) each microbatch # until the last microbatch # `False`: neither reduce-scatter nor all-reduce each # microbatch until the last microbatch "reduce_scatter_only": [False, True], }, self._test_gradient_accumulation, )
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShardGradientAccumulation(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
set_backward_flags
def set_backward_flags(_model: nn.Module, is_last_microbatch: bool): if mode == "all": set_grad_sync_flag(_model, is_last_microbatch) if not reshard_after_backward: _model.set_reshard_after_backward(is_last_microbatch) elif mode == "some_mlps": for mlp in model[1 : 1 + num_mlps_to_disable_reduce_scatter]: set_grad_sync_flag(mlp, is_last_microbatch) if not reshard_after_backward: mlp.set_reshard_after_backward(is_last_microbatch) elif mode == "root_only": set_grad_sync_flag(model, is_last_microbatch, recurse=False) if not reshard_after_backward: model.set_reshard_after_backward(is_last_microbatch, recurse=False) torch.manual_seed(42 + self.rank + 1) for iter_idx in range(5): comm_count_list = [] for microbatch_idx in range(num_microbatches): is_last_microbatch = microbatch_idx == num_microbatches - 1 set_backward_flags(model, is_last_microbatch) inp = torch.randn(batch_size, lin_dim, device="cuda") losses: List[torch.Tensor] = [] for _model in (ref_model, model): with CommDebugMode() as comm_mode: losses.append(_model(inp).sum()) losses[-1].backward() comm_count_list.append(comm_mode.get_comm_counts()) self.assertEqual(losses[0], losses[1]) comm_counts = defaultdict(int) for comm_count_dict in comm_count_list: for collective, count in comm_count_dict.items(): comm_counts[collective] += count all_gather_count = comm_counts[c10d_ops._allgather_base_] reduce_scatter_count = comm_counts[c10d_ops._reduce_scatter_base_] all_reduce_count = comm_counts[c10d_ops.allreduce_] # Expect one reduce-scatter per MLP plus one for the root's linear # on the last microbatch expected_reduce_scatter_count = num_mlps + 1 if mode == "some_mlps": # Expect additional reduce-scatters for non-disabled MLPs and # the root's linear expected_reduce_scatter_count += ( num_mlps - num_mlps_to_disable_reduce_scatter + 1 ) * (num_microbatches - 1) elif mode == "root_only": # Expect additional reduce-scatters for all MLPs expected_reduce_scatter_count += (num_mlps) * (num_microbatches - 1) expected_all_reduce_count = ( expected_reduce_scatter_count if mesh.ndim == 2 else 0 ) if reduce_scatter_only: # Specially for HSDP if only reduce-scattering but not # all-reducing until the last microbatch, expect one # reduce-scatter per MLP plus for the root per microbatch expected_reduce_scatter_count = (num_mlps + 1) * num_microbatches self.assertEqual(reduce_scatter_count, expected_reduce_scatter_count) self.assertEqual(all_reduce_count, expected_all_reduce_count) # Expect one all-gather per MLP plus one for the root's linear in # the first microbatch's forward expected_all_gather_count = num_mlps + 1 if reshard_after_forward is not False: # `True` or `2` # Add the number of MLPs without the +1 for the backward # all-gathers since the root does not reshard after forward expected_all_gather_count += num_mlps # Multiply by the number of microbatches since these # all-gathers run every microbatch expected_all_gather_count *= num_microbatches elif reshard_after_backward: # `reshard_after_forward=False` expected_all_gather_count *= num_microbatches elif mode == "all": # `reshard_after_forward/backward=False` # Only reshard parameters after the last microbatch's backward, # so there should not be any more all-gathers pass elif mode == "root_only": # `reshard_after_forward/backward=False` # The MLPs should still contribute all-gathers in each # microbatch forward expected_all_gather_count += num_mlps * (num_microbatches - 1) self.assertEqual(all_gather_count, expected_all_gather_count) for param in ref_model.parameters(): if param.grad is not None: dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) check_sharded_parity(self, ref_model, model) for _optim in (optim, ref_optim): _optim.step() # When `set_to_none=False`, we are exercising mixing # gradient accumulation with and without communication _optim.zero_grad(set_to_none=(iter_idx % 2))
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_1f1b_microbatching
def test_1f1b_microbatching(self): self.run_subtests( { "use_explicit_unshard": [False, True], "reshard_after_backward": [False, True], }, self._test_1f1b_microbatching, )
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShardGradientAccumulation(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_2d_mlp_with_nd_mesh
def test_2d_mlp_with_nd_mesh(self): global_mesh = self.init_global_mesh() self.run_subtests( { "reshard_after_forward": [False, True], "use_activation_checkpointing": [False, True], # TODO: change "mlp_dim" back to [3, 16, 17] when uneven sharding # is supported for FSDP+TP "mlp_dim": [4, 16, 20], "foreach": [False], }, functools.partial(self._test_2d_mlp_with_nd_mesh, global_mesh), )
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShardNDTraining(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_3d_mlp_with_nd_mesh
def test_3d_mlp_with_nd_mesh(self): global_mesh = self.init_global_mesh() self.run_subtests( { "reshard_after_forward": [False, True], "use_activation_checkpointing": [False, True], # TODO: change "mlp_dim" back to [3, 16, 17] when uneven sharding # is supported for FSDP+TP "mlp_dim": [4, 16, 20], "foreach": [False], }, functools.partial(self._test_3d_mlp_with_nd_mesh, global_mesh), )
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShardHSDP3DTraining(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_train_parity_hsdp
def test_train_parity_hsdp(self): shard_size = 2 if self.world_size > 2 else 1 replicate_size = self.world_size // shard_size global_mesh = init_device_mesh( "cuda", (replicate_size, shard_size), mesh_dim_names=("replicate", "shard") ) self.run_subtests( { "reshard_after_forward": [False, True], "use_activation_checkpointing": [False, True], "mlp_dim": [3, 16, 17], "sync_gradients_at_last_batch": [True, False], }, functools.partial(self._test_train_parity_hsdp, global_mesh), )
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShardHSDPTraining(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fsdp/test_fully_shard_training.py
test_register_fsdp_forward_method
def test_register_fsdp_forward_method(self): """Based on https://github.com/pytorch/pytorch/issues/109385""" class VisionTransformer(nn.Module): def __init__(self) -> None: super().__init__() self.patch_proj = nn.Conv2d(3, 1024, kernel_size=14, stride=14) def forward_features(self, imgs: torch.Tensor) -> torch.Tensor: return self.patch_proj(imgs).flatten(2).transpose(1, 2) def forward(self, imgs: torch.Tensor) -> torch.Tensor: return self.forward_features(imgs).sum(dim=1) class Model(nn.Module): def __init__(self) -> None: super().__init__() self.vit, self.projector = VisionTransformer(), nn.Linear(1024, 256) def forward(self, imgs: torch.Tensor) -> torch.Tensor: # Run `vit.forward_features`, which is not `forward`! patch_embeddings = self.vit.forward_features(imgs) return self.projector(patch_embeddings) torch.manual_seed(42) model = Model() ref_model = copy.deepcopy(model).cuda() fully_shard(model.vit) fully_shard(model.projector) fully_shard(model) register_fsdp_forward_method(model.vit, "forward_features") torch.manual_seed(42 + self.rank + 1) inp = torch.randn(4, 3, 224, 224, device="cuda") ref_loss = ref_model(inp).sum() loss = model(inp).sum() self.assertEqual(ref_loss, loss) ref_loss.backward() loss.backward() for param in ref_model.parameters(): dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) check_sharded_parity(self, ref_model, model)
import contextlib import copy import functools import itertools import unittest from collections import defaultdict from typing import Iterable, List, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, replicate from torch.distributed._composable.fsdp import ( CPUOffloadPolicy, FSDPModule, fully_shard, OffloadPolicy, register_fsdp_forward_method, ) from torch.distributed._tensor import DTensor, init_device_mesh from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, apply_activation_checkpointing, ) from torch.distributed.device_mesh import DeviceMesh from torch.distributed.tensor.debug import CommDebugMode from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( check_sharded_parity, FSDPTest, FSDPTestMultiThread, MLP, MLPStack, patch_all_gather, patch_reduce_scatter, test_compiled_fsdp, ) from torch.testing._internal.common_utils import ( get_cycles_per_ms, run_tests, wrapSwapTensorsTest, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( ModelArgs, Transformer, TransformerBlock, ) c10d_ops = torch.ops.c10d funcol = torch.ops.c10d_functional class TestFullyShardCustomForwardMethod(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fully_shard/test_fully_shard_compile.py
test_compile
def test_compile(self): self.run_subtests( { "sharding_strategy": [ ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD, ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2, ], "skip_fsdp_guards": [True, False], "act_checkpoint": [True, False], }, self._test_compile, )
import copy import sys import unittest import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._composable import checkpoint, fully_shard from torch.distributed.fsdp import ShardingStrategy from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN from torch.utils._triton import has_triton class TestCompile(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fully_shard/test_fully_shard_init.py
test_policy
def test_policy(self): """Tests passing a ``policy`` for pseudo-auto-wrapping.""" self.run_subtests( { "policy": [ None, ModuleWrapPolicy({UnitModule}), ModuleWrapPolicy({nn.Sequential}), ], }, self._test_policy, )
def test_policy(self): """Tests passing a ``policy`` for pseudo-auto-wrapping.""" def lambda_fn(module: nn.Module): if isinstance(module, nn.Sequential): return True elif isinstance(module, FakeSequential): return {"backward_prefetch": BackwardPrefetch.BACKWARD_POST} return False self.run_subtests( { "policy": [ None, ModuleWrapPolicy({UnitModule}), ModuleWrapPolicy({nn.Sequential}), CustomPolicy(lambda_fn), ], }, self._test_policy, )
import copy import sys from typing import Optional import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed._composable import fully_shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp._common_utils import _is_fsdp_flattened, clean_tensor_name from torch.distributed.fsdp.wrap import _FSDPPolicy, ModuleWrapPolicy from torch.testing._internal.common_dist_composable import ( CompositeParamModel, NestedSequentialModel, UnitModule, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestInitialization(FSDPTest):
import copy import sys from typing import Optional import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed._composable import fully_shard from torch.distributed.fsdp import BackwardPrefetch, FullyShardedDataParallel as FSDP from torch.distributed.fsdp._common_utils import _is_fsdp_flattened, clean_tensor_name from torch.distributed.fsdp.wrap import _Policy, CustomPolicy, ModuleWrapPolicy from torch.testing._internal.common_dist_composable import ( CompositeParamModel, FakeSequential, NestedSequentialModel, UnitModule, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestInitialization(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/_composable/fully_shard/test_fully_shard_init.py
lambda_fn
self.run_subtests( { "policy": [ None, ModuleWrapPolicy({UnitModule}), ModuleWrapPolicy({nn.Sequential}), ], }, self._test_policy, )
def lambda_fn(module: nn.Module): if isinstance(module, nn.Sequential): return True elif isinstance(module, FakeSequential): return {"backward_prefetch": BackwardPrefetch.BACKWARD_POST} return False self.run_subtests( { "policy": [ None, ModuleWrapPolicy({UnitModule}), ModuleWrapPolicy({nn.Sequential}), CustomPolicy(lambda_fn), ], }, self._test_policy, )
import copy import sys from typing import Optional import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed._composable import fully_shard from torch.distributed.fsdp import BackwardPrefetch, FullyShardedDataParallel as FSDP from torch.distributed.fsdp._common_utils import _is_fsdp_flattened, clean_tensor_name from torch.distributed.fsdp.wrap import _Policy, CustomPolicy, ModuleWrapPolicy from torch.testing._internal.common_dist_composable import ( CompositeParamModel, FakeSequential, NestedSequentialModel, UnitModule, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/_composable/fully_shard/test_fully_shard_init.py
test_nested_fully_shard_shared_state
def test_nested_fully_shard_shared_state(self): """ Tests that nested applications of ``fully_shard`` share the expected data structure state. """ device = torch.device("cuda") composable_module = CompositeParamModel(device=device) fully_shard(composable_module.u1) fully_shard(composable_module.u2) fully_shard(composable_module) # Run a forward pass to trigger lazy initialization inp = torch.randn((2, 100), device=device) composable_module(inp) # Check that all modules with `fully_shard` applied share the same data # structure state for the structures with the given names (there is no # need to check all of them to verify that the sharing worked). # NOTE: This check only requires that the data structure state is # shared. Namely, sharing the FSDP state object itself is sufficient # but not necessary. data_structure_names = ["_streams", "_exec_order_data", "_free_event_queue"] for data_structure_name in data_structure_names: all_structures = set() for module in ( composable_module.u1, composable_module.u2, composable_module, ): all_structures.add( id(getattr(fully_shard.state(module), data_structure_name)) ) self.assertEqual(len(all_structures), 1)
def test_nested_fully_shard_shared_state(self): """ Tests that nested applications of ``fully_shard`` share the expected data structure state. """ self.run_subtests( {"use_policy": [False, True]}, self._test_nested_fully_shard_shared_state, )
import copy import sys from typing import Optional import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed._composable import fully_shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp._common_utils import _is_fsdp_flattened, clean_tensor_name from torch.distributed.fsdp.wrap import _FSDPPolicy, ModuleWrapPolicy from torch.testing._internal.common_dist_composable import ( CompositeParamModel, NestedSequentialModel, UnitModule, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestInitialization(FSDPTest):
import copy import sys from typing import Optional import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed._composable import fully_shard from torch.distributed.fsdp import BackwardPrefetch, FullyShardedDataParallel as FSDP from torch.distributed.fsdp._common_utils import _is_fsdp_flattened, clean_tensor_name from torch.distributed.fsdp.wrap import _Policy, CustomPolicy, ModuleWrapPolicy from torch.testing._internal.common_dist_composable import ( CompositeParamModel, FakeSequential, NestedSequentialModel, UnitModule, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestInitialization(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified