ZTWHHH commited on
Commit
421767e
·
verified ·
1 Parent(s): 1d36377

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. infer_4_30_0/lib/python3.10/site-packages/vllm/vllm_flash_attn/_vllm_fa2_C.abi3.so +3 -0
  3. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/__init__.py +4 -0
  4. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc +0 -0
  5. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc +0 -0
  6. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc +0 -0
  7. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc +0 -0
  8. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc +0 -0
  9. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py +126 -0
  10. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/contract.py +224 -0
  11. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py +2 -0
  12. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc +0 -0
  13. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_api.cpython-310.pyc +0 -0
  14. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_collectives.cpython-310.pyc +0 -0
  15. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc +0 -0
  16. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc +0 -0
  17. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc +0 -0
  18. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc +0 -0
  19. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc +0 -0
  20. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/fully_shard.cpython-310.pyc +0 -0
  21. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_api.py +80 -0
  22. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_collectives.py +477 -0
  23. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py +152 -0
  24. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_init.py +168 -0
  25. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py +754 -0
  26. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param_group.py +614 -0
  27. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_state.py +383 -0
  28. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/fully_shard.py +446 -0
  29. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py +131 -0
  30. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/replicate.py +256 -0
  31. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_tools/__pycache__/fsdp2_mem_tracker.cpython-310.pyc +0 -0
  32. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_tools/mem_tracker.py +943 -0
  33. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_unshard_param_utils.cpython-310.pyc +0 -0
  34. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_flat_param.py +0 -0
  35. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py +2091 -0
  36. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_runtime_utils.py +1638 -0
  37. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_shard_utils.py +137 -0
  38. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_state_dict_utils.py +924 -0
  39. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_trace_utils.py +238 -0
  40. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_traversal_utils.py +113 -0
  41. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_unshard_param_utils.py +336 -0
  42. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/api.py +410 -0
  43. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py +0 -0
  44. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/sharded_grad_scaler.py +396 -0
  45. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/wrap.py +608 -0
  46. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/api.cpython-310.pyc +0 -0
  47. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/options.cpython-310.pyc +0 -0
  48. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/rref_proxy.cpython-310.pyc +0 -0
  49. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__init__.py +20 -0
  50. infer_4_47_1/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -875,3 +875,5 @@ infer_4_47_1/lib/python3.10/site-packages/sympy/core/tests/__pycache__/test_args
875
  infer_4_47_1/lib/python3.10/site-packages/sympy/stats/__pycache__/crv_types.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
876
  infer_4_47_1/lib/python3.10/site-packages/sympy/core/__pycache__/function.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
877
  infer_4_47_1/lib/python3.10/site-packages/sympy/core/__pycache__/expr.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
875
  infer_4_47_1/lib/python3.10/site-packages/sympy/stats/__pycache__/crv_types.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
876
  infer_4_47_1/lib/python3.10/site-packages/sympy/core/__pycache__/function.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
877
  infer_4_47_1/lib/python3.10/site-packages/sympy/core/__pycache__/expr.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
878
+ infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
879
+ infer_4_30_0/lib/python3.10/site-packages/vllm/vllm_flash_attn/_vllm_fa2_C.abi3.so filter=lfs diff=lfs merge=lfs -text
infer_4_30_0/lib/python3.10/site-packages/vllm/vllm_flash_attn/_vllm_fa2_C.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c666388d3fc323a1e8ec1a89543d391a531cc405dc112ea1c66c372b620664ea
3
+ size 220831720
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .checkpoint_activation import checkpoint
2
+ from .contract import _get_registry, contract
3
+ from .fully_shard import fully_shard
4
+ from .replicate import replicate
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (364 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/checkpoint_activation.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/contract.cpython-310.pyc ADDED
Binary file (6.13 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/fully_shard.cpython-310.pyc ADDED
Binary file (3.88 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/__pycache__/replicate.cpython-310.pyc ADDED
Binary file (7.2 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/checkpoint_activation.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ # mypy: allow-untyped-defs
3
+ from contextlib import contextmanager, nullcontext
4
+ from typing import Any, ContextManager, Dict, Optional, Tuple
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ from torch.utils.checkpoint import (
9
+ _checkpoint_without_reentrant_generator,
10
+ _DEFAULT_DETERMINISM_MODE,
11
+ )
12
+
13
+ from .contract import contract
14
+
15
+
16
+ @contextmanager
17
+ def _no_hook(module: nn.Module, user_ctx: Optional[ContextManager] = None):
18
+ r"""
19
+ Disable hooks installed by checkpoint to avoid unintentional recursion
20
+ during backward recomputation.
21
+ """
22
+
23
+ with user_ctx if user_ctx else nullcontext():
24
+ orig_enable_hook = checkpoint.state(module).enable_hook
25
+ checkpoint.state(module).enable_hook = False
26
+ try:
27
+ yield
28
+ finally:
29
+ checkpoint.state(module).enable_hook = orig_enable_hook
30
+
31
+
32
+ @contract()
33
+ def checkpoint(module: nn.Module, **kwargs) -> nn.Module:
34
+ r"""
35
+ This is a composable activation checkpointing API. Unlike functional
36
+ activation checkpointing APIs, this one does not require changing model
37
+ source code. Unlike ``nn.Module`` wrapper activation checkpointing APIs,
38
+ this one does not modify model structure or fully-qualified names either.
39
+ Under the hood, it registers activation checkpointing logic as pre- and
40
+ post-forward hooks. Hence, this API can be easily applied to any model or
41
+ sub-modules in the model.
42
+
43
+ Args:
44
+ module (nn.Module): the target model or sub-module to apply activation
45
+ checkpointing.
46
+
47
+ Example::
48
+ >>> # xdoctest: +SKIP
49
+ >>> import torch.nn as nn
50
+ >>>
51
+ >>> class MyModel(nn.Module):
52
+ >>> def __init__(self) -> None:
53
+ >>> super().__init__()
54
+ >>> self.l1 = nn.Linear(10, 10)
55
+ >>> self.l2 = nn.Linear(10, 10)
56
+ >>>
57
+ >>> def forward(self, x):
58
+ >>> return self.l2(self.l1(x))
59
+ >>>
60
+ >>> model = MyModel()
61
+ >>> checkpoint(model.l1) # apply activation checkpointing only to l1
62
+ >>> model(torch.zeros(2, 10)).sum().backward()
63
+
64
+ """
65
+ torch._C._log_api_usage_once("torch.distributed.checkpoint")
66
+
67
+ use_reentrant = kwargs.pop("use_reentrant", False)
68
+ if use_reentrant:
69
+ raise NotImplementedError(
70
+ "use_reentrant=True is not supported in composable checkpoint. "
71
+ "Please use torch.utils.checkpoint.checkpoint instead."
72
+ )
73
+ preserve_rng_state = kwargs.pop("preserve_rng_state", True)
74
+ user_context_fns = kwargs.pop("context_fn", None)
75
+ determinism_check = kwargs.pop("determinism_check", _DEFAULT_DETERMINISM_MODE)
76
+ debug = kwargs.pop("debug", False)
77
+
78
+ if kwargs:
79
+ raise ValueError(
80
+ "Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)
81
+ )
82
+
83
+ def forward_pre_hook(
84
+ module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
85
+ ) -> None:
86
+ if checkpoint.state(module).enable_hook:
87
+
88
+ def context_fns():
89
+ if user_context_fns is not None:
90
+ ctx1, ctx2 = user_context_fns()
91
+ return ctx1, _no_hook(module, ctx2)
92
+ else:
93
+ return nullcontext(), _no_hook(module)
94
+
95
+ checkpoint.state(
96
+ module
97
+ )._ac_generator = _checkpoint_without_reentrant_generator(
98
+ module,
99
+ preserve_rng_state,
100
+ context_fns,
101
+ determinism_check,
102
+ debug,
103
+ *args,
104
+ **kwargs,
105
+ )
106
+ next(checkpoint.state(module)._ac_generator)
107
+
108
+ def forward_hook(module: nn.Module, inputs: Tuple[Any, ...], output: Any) -> Any:
109
+ if checkpoint.state(module).enable_hook:
110
+ try:
111
+ next(checkpoint.state(module)._ac_generator)
112
+ except StopIteration:
113
+ pass
114
+ else:
115
+ raise RuntimeError(
116
+ "Expected non-reentrant activation checkpoint generator to be exhausted, but it was not!"
117
+ )
118
+
119
+ # Ensure that we no longer hold on to the generator. always_call=True helps ensure we
120
+ # clear this even in the case of exception in fwd pass.
121
+ checkpoint.state(module)._ac_generator = None
122
+
123
+ checkpoint.state(module).enable_hook = True
124
+ module.register_forward_pre_hook(forward_pre_hook, with_kwargs=True)
125
+ module.register_forward_hook(forward_hook, prepend=True, always_call=True)
126
+ return module
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/contract.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import uuid
3
+ from collections import OrderedDict
4
+ from functools import wraps
5
+ from typing import Callable, Dict, List, Optional, Sequence, Type, Union
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ from torch.distributed._composable_state import _State
10
+ from torch.distributed.utils import _get_root_modules
11
+
12
+
13
+ def generate_state_key(string="__composable_api_state_key"):
14
+ return f"{string}_{str(uuid.uuid4())}"
15
+
16
+
17
+ STATE_KEY = generate_state_key()
18
+ REGISTRY_KEY = generate_state_key()
19
+
20
+
21
+ # TODO: we can add additional info to RegistryItem to share across APIs. E.g.,
22
+ # we can add args and kwargs here, and then we can detect whether fully_shard
23
+ # is combined with reentrant activation checkpointing and error out with a clear
24
+ # message.
25
+ class RegistryItem:
26
+ pass
27
+
28
+
29
+ def contract(state_cls: Type[_State] = _State):
30
+ r"""
31
+ Decorate a function as a composable distributed API, where the first
32
+ argument of the function must be an :class:`nn.Module` instance or sequence
33
+ of :class:`nn.Module` instances.
34
+
35
+ The decorator verifies that the decorated function does not modify
36
+ fully-qualified names (FQNs) for parameters, buffers, or modules. The
37
+ decorated function can return different module instances than the input
38
+ modules; the FQN invariant will be enforced following the input order.
39
+
40
+ When a function ``func`` is decorated by ``@contract()``, a
41
+ ``.state(module: nn.Module)`` method will be installed to the decorated
42
+ function. Then you can retrieve and modify the state on a module by calling
43
+ ``func.state(module)``.
44
+
45
+ Example::
46
+ >>> # xdoctest: +SKIP
47
+ >>> import torch.nn as nn
48
+ >>>
49
+ >>> class MyModel(nn.Module):
50
+ >>> def __init__(self) -> None:
51
+ >>> super().__init__()
52
+ >>> self.l1 = nn.Linear(10, 10)
53
+ >>> self.l2 = nn.Linear(10, 10)
54
+ >>>
55
+ >>> def forward(self, x):
56
+ >>> return self.l2(self.l1(x))
57
+ >>>
58
+ >>> @contract()
59
+ >>> def my_feature(module: nn.Module) -> nn.Module:
60
+ >>> my_feature.state(module).some_state = "any value"
61
+ >>> return module
62
+ >>>
63
+ >>> model = MyModel()
64
+ >>> my_feature(model.l1)
65
+ >>> assert my_feature.state(model.l1).some_state == "any value"
66
+ >>> my_feature(model.l2)
67
+ >>> model(torch.randn(2, 10)).sum().backward()
68
+ """
69
+
70
+ # wraps will make functions decorated with contract() pickleable - needed for integration with torch.package
71
+ @wraps(state_cls)
72
+ def inner(func):
73
+ @wraps(func)
74
+ def wrapper(
75
+ module: Union[nn.Module, Sequence[nn.Module]], *args, **kwargs
76
+ ) -> Optional[nn.Module]:
77
+ inp_module = module
78
+ if isinstance(module, nn.Module):
79
+ modules = [module]
80
+ else:
81
+ # If the user passes a sequence of modules, then we assume that
82
+ # we only need to insert the state object on the root modules
83
+ # (i.e. those without a parent) among the passed-in modules.
84
+ modules = _get_root_modules(list(module))
85
+ state = state_cls() # shared across all modules
86
+ registry_item = RegistryItem() # shared across all modules
87
+
88
+ # `func` is allowed to return different module instances than the
89
+ # input modules as long as FQNs are preserved following the input
90
+ # module order
91
+ all_orig_named_params: List[Dict[str, nn.Parameter]] = []
92
+ all_orig_named_buffers: List[Dict[str, torch.Tensor]] = []
93
+ all_orig_named_modules: List[Dict[str, nn.Module]] = []
94
+
95
+ for module in modules:
96
+ default_all_state: Dict[Callable, _State] = OrderedDict()
97
+ default_registry: Dict[str, RegistryItem] = OrderedDict()
98
+ all_state: Dict[Callable, _State] = module.__dict__.setdefault( # type: ignore[call-overload]
99
+ STATE_KEY, default_all_state
100
+ )
101
+ if not isinstance(all_state, dict):
102
+ raise AssertionError(
103
+ f"Distributed composable API states corrupted: {all_state}"
104
+ )
105
+ registry: Dict[str, RegistryItem] = module.__dict__.setdefault( # type: ignore[call-overload]
106
+ REGISTRY_KEY, default_registry
107
+ )
108
+ if not isinstance(registry, dict):
109
+ raise AssertionError(
110
+ f"Distributed composable API registry corrupted: {registry}"
111
+ )
112
+ if func in all_state or func.__name__ in registry:
113
+ raise AssertionError(
114
+ "Each distinct composable distributed API can only be applied to a "
115
+ f"module once. {func.__name__} has already been applied to the "
116
+ f"following module:\n{module}"
117
+ )
118
+ all_state.setdefault(func, state)
119
+ registry.setdefault(func.__name__, registry_item)
120
+
121
+ all_orig_named_params.append(OrderedDict(module.named_parameters()))
122
+ all_orig_named_buffers.append(OrderedDict(module.named_buffers()))
123
+ all_orig_named_modules.append(OrderedDict(module.named_modules()))
124
+
125
+ updated = func(inp_module, *args, **kwargs)
126
+ if updated is None:
127
+ updated = inp_module
128
+ if isinstance(updated, nn.Module):
129
+ updated_modules = [updated]
130
+ else:
131
+ updated_modules = _get_root_modules(list(inp_module))
132
+
133
+ all_new_named_params: List[Dict[str, nn.Parameter]] = []
134
+ all_new_named_buffers: List[Dict[str, torch.Tensor]] = []
135
+ all_new_named_modules: List[Dict[str, nn.Module]] = []
136
+ for module in updated_modules:
137
+ all_new_named_params.append(OrderedDict(module.named_parameters()))
138
+ all_new_named_buffers.append(OrderedDict(module.named_buffers()))
139
+ all_new_named_modules.append(OrderedDict(module.named_modules()))
140
+
141
+ num_orig_modules = len(all_orig_named_modules)
142
+ num_new_modules = len(all_new_named_modules)
143
+ if num_orig_modules != num_new_modules:
144
+ raise AssertionError(
145
+ f"{func.__name__} should return the same number of modules as input modules"
146
+ f"Inputs: {num_orig_modules} modules\n"
147
+ f"Outputs: {num_new_modules} modules"
148
+ )
149
+
150
+ def check_fqn(orig_fqns: List[str], new_fqns: List[str], check_key: str):
151
+ if orig_fqns == new_fqns:
152
+ return
153
+
154
+ orig_fqn_set, new_fqn_set = set(orig_fqns), set(new_fqns)
155
+ orig_only = orig_fqn_set - new_fqn_set
156
+ new_only = new_fqn_set - orig_fqn_set
157
+ if len(orig_only) or len(new_only):
158
+ raise RuntimeError(
159
+ f"{check_key}"
160
+ "Composable distributed API implementations cannot modify FQNs.\n"
161
+ f"FQNs only in original: {orig_only}\n"
162
+ f"FQNs only in new: {new_only}"
163
+ )
164
+ else:
165
+ raise RuntimeError(
166
+ f"{check_key}"
167
+ "Composable distributed API implementations cannot modify "
168
+ "the order of FQNs.\n"
169
+ f"Original FQNs: {orig_only}\n"
170
+ f"New FQNs: {new_only}"
171
+ )
172
+
173
+ for orig_named_params, new_named_params in zip(
174
+ all_orig_named_params, all_new_named_params
175
+ ):
176
+ check_fqn(
177
+ list(orig_named_params.keys()),
178
+ list(new_named_params.keys()),
179
+ "Checking parameters: ",
180
+ )
181
+ for orig_named_buffers, new_named_buffers in zip(
182
+ all_orig_named_buffers, all_new_named_buffers
183
+ ):
184
+ check_fqn(
185
+ list(orig_named_buffers.keys()),
186
+ list(new_named_buffers.keys()),
187
+ "Checking buffers: ",
188
+ )
189
+ for orig_named_modules, new_named_modules in zip(
190
+ all_orig_named_modules, all_new_named_modules
191
+ ):
192
+ check_fqn(
193
+ list(orig_named_modules.keys()),
194
+ list(new_named_modules.keys()),
195
+ "Checking modules: ",
196
+ )
197
+
198
+ # TODO: verify that installed distributed paradigms are compatible with
199
+ # each other.
200
+
201
+ return updated
202
+
203
+ def get_state(module: nn.Module) -> Optional[_State]:
204
+ return module.__dict__.setdefault( # type: ignore[call-overload]
205
+ STATE_KEY,
206
+ {}, # TODO(@yhcharles): this is a temporary fix, need a better way
207
+ ).get(
208
+ func
209
+ ) # type: ignore[call-overload]
210
+
211
+ wrapper.state = get_state # type: ignore[attr-defined]
212
+
213
+ return wrapper
214
+
215
+ return inner
216
+
217
+
218
+ def _get_registry(module: nn.Module) -> Optional[Dict[str, RegistryItem]]:
219
+ r"""
220
+ Get an ``OrderedDict`` of composable APIs that have been applied to the
221
+ ``module``, indexed by the API name. If no API has been applied, then this
222
+ returns ``None``.
223
+ """
224
+ return getattr(module, REGISTRY_KEY, None)
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from ._fsdp_api import CPUOffloadPolicy, MixedPrecisionPolicy, OffloadPolicy
2
+ from .fully_shard import FSDPModule, fully_shard, register_fsdp_forward_method
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (391 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_api.cpython-310.pyc ADDED
Binary file (4.05 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_collectives.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc ADDED
Binary file (5.16 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc ADDED
Binary file (5.18 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc ADDED
Binary file (20.9 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param_group.cpython-310.pyc ADDED
Binary file (18.5 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/fully_shard.cpython-310.pyc ADDED
Binary file (18.4 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_api.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from dataclasses import dataclass
3
+ from typing import Optional
4
+
5
+ import torch
6
+
7
+
8
+ @dataclass(frozen=True)
9
+ class MixedPrecisionPolicy:
10
+ """
11
+ This configures FSDP's mixed precision. Unlike autocast, this applies mixed
12
+ precision at the module level, not op level, which means low-precision
13
+ activations are saved for backward and high-to-low-precision casts are
14
+ incurred only at module boundaries.
15
+
16
+ FSDP works well with module-level mixed precision since it keeps the
17
+ high-precision sharded parameters in memory anyway. In other words, FSDP
18
+ does not require any extra memory to keep a high-precision copy of the
19
+ parameters for the optimizer step.
20
+
21
+ Attributes:
22
+ param_dtype (Optional[torch.dtype]): This specifies the dtype for
23
+ the unsharded parameter and hence the dtype for forward/backward
24
+ computation and the parameter all-gather. If this is ``None``, then
25
+ the unsharded parameter uses the original dtype. The optimizer step
26
+ uses the sharded parameter in the original dtype. (Default:
27
+ ``None``)
28
+ reduce_dtype (Optional[torch.dtype]): This specifies the dtype for
29
+ gradient reduction (i.e. reduce-scatter or all-reduce). If this is
30
+ ``None`` but ``param_dtype`` is not ``None``, then the reduction
31
+ uses the compute dtype. This can be used to run gradient reduction
32
+ in full precision while using low precision for compute. If also
33
+ gradient reduction is disabled via :meth:`set_requires_gradient_sync`,
34
+ then FSDP will accumulate gradients using ``reduce_dtype``.
35
+ (Default: ``None``)
36
+ output_dtype (Optional[torch.dtype]): This specifies the dtype for
37
+ casting floating-point forward outputs. This can be used to
38
+ help implement cases where different modules have different mixed
39
+ precision policies. (Default: ``None``)
40
+ cast_forward_inputs (bool): This specifies whether FSDP should cast the
41
+ forward's floating-point input tensors to ``param_dtype`` or not.
42
+ """
43
+
44
+ param_dtype: Optional[torch.dtype] = None
45
+ reduce_dtype: Optional[torch.dtype] = None
46
+ output_dtype: Optional[torch.dtype] = None
47
+ cast_forward_inputs: bool = True
48
+
49
+ def __post_init__(self):
50
+ # Clamp `reduce_dtype` to `None` if no casting is required: since
51
+ # gradients are computed in `param_dtype`, if `reduce_dtype` matches,
52
+ # then we do not need extra casting
53
+ if self.param_dtype == self.reduce_dtype:
54
+ # Bypass the frozen dataclass checks
55
+ object.__setattr__(self, "reduce_dtype", None)
56
+
57
+
58
+ @dataclass
59
+ class OffloadPolicy:
60
+ """This base class represents the policy of no offloading."""
61
+
62
+
63
+ @dataclass
64
+ class CPUOffloadPolicy(OffloadPolicy):
65
+ """
66
+ This offload policy offloads parameters, gradients, and optimizer states to
67
+ CPU. Sharded parameters are copied host-to-device before all-gather. The
68
+ all-gathered parameters are freed according to ``reshard_after_forward``.
69
+ Sharded gradients are copied device-to-host in backward, and the optimizer
70
+ step runs on CPU with CPU optimizer states.
71
+
72
+ Attributes:
73
+ pin_memory (bool): Whether to pin sharded parameter and gradient
74
+ memory. Pinning memory allows H2D/D2H copying without blocking the
75
+ CPU and in turn, overlap with compute, but pinned memory cannot be
76
+ used by other processes. Set this to ``False`` if you have
77
+ insufficient CPU memory. (Default: ``True``)
78
+ """
79
+
80
+ pin_memory: bool = True
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_collectives.py ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ from typing import cast, List, NamedTuple, Optional, Tuple, Union
3
+
4
+ import torch
5
+ import torch._dynamo.compiled_autograd as ca
6
+ import torch.distributed as dist
7
+ from torch.distributed.distributed_c10d import ReduceOp
8
+ from torch.distributed.tensor import DTensor
9
+
10
+ from ._fsdp_common import (
11
+ _get_dim0_padded_size,
12
+ _raise_assert_with_print,
13
+ _to_dtype_if_needed,
14
+ )
15
+ from ._fsdp_param import FSDPParam, ShardedState
16
+
17
+
18
+ class AllGatherResult(NamedTuple):
19
+ all_gather_output: torch.Tensor
20
+ all_gather_event: Optional[torch.cuda.Event]
21
+ all_gather_work: Optional[dist.distributed_c10d.Work]
22
+ # For each parameter, the all-gather input dtype for each input
23
+ param_all_gather_input_dtypes: List[List[torch.dtype]]
24
+ # For each parameter, the all-gather input numel for each input
25
+ param_all_gather_input_numels: List[List[int]]
26
+ # 1D flattened version of `param_all_gather_input_numels` saved to avoid
27
+ # CPU overhead from recomputing
28
+ all_gather_input_split_sizes: List[int]
29
+
30
+
31
+ lib = torch.library.Library("fsdp", "FRAGMENT") # noqa: TOR901
32
+
33
+ lib.define(
34
+ """
35
+ all_gather_copy_in(
36
+ Tensor[] all_gather_inputs,
37
+ SymInt[] inp_split_sizes,
38
+ SymInt all_gather_input_numel,
39
+ SymInt world_size,
40
+ SymInt rank,
41
+ ScalarType dtype,
42
+ Device device
43
+ ) -> (Tensor, Tensor)
44
+ """
45
+ )
46
+
47
+
48
+ @torch.library.impl(lib, "all_gather_copy_in", "Meta")
49
+ def all_gather_copy_in_meta(
50
+ all_gather_inputs: List[torch.Tensor],
51
+ inp_split_sizes: List[int],
52
+ all_gather_input_numel: int,
53
+ world_size: int,
54
+ rank: int,
55
+ dtype: torch.dtype,
56
+ device: torch.device,
57
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
58
+ all_gather_output = torch.empty(
59
+ (all_gather_input_numel * world_size,), dtype=dtype, device="meta"
60
+ )
61
+ all_gather_input = all_gather_output.narrow(
62
+ 0, all_gather_input_numel * rank, all_gather_input_numel
63
+ )
64
+ return all_gather_input, all_gather_output
65
+
66
+
67
+ @torch.library.impl(lib, "all_gather_copy_in", "CUDA")
68
+ @torch.library.impl(lib, "all_gather_copy_in", "CPU")
69
+ def all_gather_copy_in_cuda(
70
+ all_gather_inputs: List[torch.Tensor],
71
+ inp_split_sizes: List[int],
72
+ all_gather_input_numel: int,
73
+ world_size: int,
74
+ rank: int,
75
+ dtype: torch.dtype,
76
+ device: torch.device,
77
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
78
+ all_gather_output = torch.empty(
79
+ (all_gather_input_numel * world_size,), dtype=dtype, device=device
80
+ )
81
+ all_gather_input = all_gather_output.narrow(
82
+ 0, all_gather_input_numel * rank, all_gather_input_numel
83
+ )
84
+ foreach_copy_dsts = torch.split(all_gather_input, inp_split_sizes)
85
+ with torch.no_grad():
86
+ torch._foreach_copy_(foreach_copy_dsts, all_gather_inputs)
87
+ return all_gather_input, all_gather_output
88
+
89
+
90
+ lib.define(
91
+ "split_with_sizes_copy(Tensor all_gather_output, SymInt[] all_gather_input_split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()"
92
+ )
93
+
94
+
95
+ @torch.library.impl(lib, "split_with_sizes_copy", "Meta")
96
+ @torch.library.impl(lib, "split_with_sizes_copy", "CUDA")
97
+ @torch.library.impl(lib, "split_with_sizes_copy", "CPU")
98
+ def split_with_sizes_copy(
99
+ all_gather_output: torch.Tensor,
100
+ all_gather_input_split_sizes: List[int],
101
+ dim: int,
102
+ out: List[torch.Tensor],
103
+ ) -> None:
104
+ torch.split_with_sizes_copy(
105
+ all_gather_output, all_gather_input_split_sizes, dim=dim, out=out
106
+ )
107
+
108
+
109
+ lib.define(
110
+ "chunk_cat(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> ()"
111
+ )
112
+
113
+
114
+ @torch.library.impl(lib, "chunk_cat", "Meta")
115
+ @torch.library.impl(lib, "chunk_cat", "CUDA")
116
+ @torch.library.impl(lib, "chunk_cat", "CPU")
117
+ def chunk_cat(
118
+ tensors: List[torch.Tensor],
119
+ dim: int,
120
+ num_chunks: int,
121
+ out: torch.Tensor,
122
+ ) -> None:
123
+ torch._chunk_cat(tensors, dim, num_chunks, out=out)
124
+
125
+
126
+ @torch.no_grad()
127
+ def foreach_all_gather(
128
+ fsdp_params: List[FSDPParam],
129
+ group: dist.ProcessGroup,
130
+ async_op: bool,
131
+ all_gather_copy_in_stream: torch.cuda.Stream,
132
+ all_gather_stream: torch.cuda.Stream,
133
+ device: torch.device,
134
+ ) -> Optional[AllGatherResult]:
135
+ world_size, rank = group.size(), group.rank()
136
+ with torch.cuda.stream(all_gather_copy_in_stream):
137
+ param_all_gather_inputs = _get_param_all_gather_inputs(fsdp_params)
138
+ (
139
+ param_all_gather_input_dtypes,
140
+ param_all_gather_input_numels,
141
+ dtype,
142
+ ) = _get_all_gather_input_metadatas(param_all_gather_inputs)
143
+ if dtype == torch.uint8:
144
+ all_gather_inputs = [
145
+ t.view(torch.uint8) for ts in param_all_gather_inputs for t in ts
146
+ ]
147
+ else:
148
+ all_gather_inputs = [t for ts in param_all_gather_inputs for t in ts]
149
+ inp_split_sizes = [t.numel() for t in all_gather_inputs]
150
+ all_gather_input_numel = sum(inp_split_sizes)
151
+ all_gather_input, all_gather_output = torch.ops.fsdp.all_gather_copy_in(
152
+ all_gather_inputs,
153
+ inp_split_sizes,
154
+ all_gather_input_numel,
155
+ world_size,
156
+ rank,
157
+ dtype,
158
+ device,
159
+ )
160
+ del param_all_gather_inputs
161
+ all_gather_stream.wait_stream(all_gather_copy_in_stream)
162
+ with torch.cuda.stream(all_gather_stream):
163
+ all_gather_work = dist.all_gather_into_tensor(
164
+ output_tensor=all_gather_output,
165
+ input_tensor=all_gather_input,
166
+ group=group,
167
+ async_op=async_op,
168
+ )
169
+ all_gather_event = all_gather_stream.record_event()
170
+ return AllGatherResult(
171
+ all_gather_output,
172
+ all_gather_event,
173
+ all_gather_work,
174
+ param_all_gather_input_dtypes,
175
+ param_all_gather_input_numels,
176
+ inp_split_sizes,
177
+ )
178
+
179
+
180
+ @torch.no_grad()
181
+ def _get_param_all_gather_inputs(
182
+ fsdp_params: List[FSDPParam],
183
+ ) -> List[List[torch.Tensor]]:
184
+ if ca.compiled_autograd_enabled:
185
+ return [fsdp_param.all_gather_inputs for fsdp_param in fsdp_params]
186
+
187
+ # Intentionally try to run a fast-path that bypasses abstractions for the
188
+ # common FSDP case of bf16/fp32 mixed precision in order to use foreach
189
+ # copy for lower CPU overhead and more efficient copying in eager
190
+ def use_foreach_copy(fsdp_param: FSDPParam) -> bool:
191
+ return (
192
+ fsdp_param.param_dtype is not None
193
+ and not fsdp_param.offload_to_cpu
194
+ and not hasattr(fsdp_param._sharded_local_tensor, "fsdp_pre_all_gather")
195
+ )
196
+
197
+ param_all_gather_inputs: List[List[torch.Tensor]] = [[] for _ in fsdp_params]
198
+ foreach_copy_indices: List[int] = []
199
+ foreach_copy_inputs: List[torch.Tensor] = []
200
+ foreach_copy_input_numels: List[int] = []
201
+
202
+ # 1st pass: for foreach-copy parameters, get inputs and metadata for the
203
+ # foreach copy, and for the others, actually get their all-gather inputs
204
+ for i, fsdp_param in enumerate(fsdp_params):
205
+ if use_foreach_copy(fsdp_param):
206
+ foreach_copy_indices.append(i)
207
+ all_gather_input = (
208
+ fsdp_param._sharded_param_data
209
+ if fsdp_param.sharded_state == ShardedState.SHARDED
210
+ else cast(torch.Tensor, fsdp_param._sharded_post_forward_param_data)
211
+ )
212
+ foreach_copy_inputs.append(all_gather_input)
213
+ foreach_copy_input_numels.append(all_gather_input.numel())
214
+ else:
215
+ param_all_gather_inputs[i] = fsdp_param.all_gather_inputs
216
+
217
+ # 2nd pass: use foreach copy to compute the remaining all-gather inputs
218
+ if foreach_copy_inputs:
219
+ fsdp_param_0 = fsdp_params[foreach_copy_indices[0]]
220
+ param_dtype, device = fsdp_param_0.param_dtype, fsdp_param_0.device
221
+ flat_foreach_copy_input = torch.empty(
222
+ (sum(foreach_copy_input_numels),), device=device, dtype=param_dtype
223
+ )
224
+ splits = torch.split(flat_foreach_copy_input, foreach_copy_input_numels)
225
+ torch._foreach_copy_(splits, foreach_copy_inputs)
226
+ for i, split in zip(foreach_copy_indices, splits):
227
+ param_all_gather_inputs[i] = [split]
228
+
229
+ return param_all_gather_inputs
230
+
231
+
232
+ @torch.no_grad()
233
+ def foreach_all_gather_copy_out(
234
+ all_gather_result: AllGatherResult,
235
+ fsdp_params: List[FSDPParam],
236
+ group: dist.ProcessGroup,
237
+ ) -> None:
238
+ (
239
+ all_gather_output,
240
+ all_gather_event,
241
+ all_gather_work,
242
+ param_all_gather_input_dtypes,
243
+ param_all_gather_input_numels,
244
+ all_gather_input_split_sizes,
245
+ ) = all_gather_result
246
+ if all_gather_event is not None: # sync op
247
+ torch.cuda.current_stream().wait_event(all_gather_event)
248
+ if isinstance(all_gather_work, dist.distributed_c10d.Work): # async op
249
+ all_gather_work.wait()
250
+ world_size, device = group.size(), all_gather_output.device
251
+ for all_gather_input_numels, all_gather_input_dtypes, fsdp_param in zip(
252
+ param_all_gather_input_numels, param_all_gather_input_dtypes, fsdp_params
253
+ ):
254
+ if ca.compiled_autograd_enabled:
255
+ fsdp_param.init_all_gather_outputs(
256
+ all_gather_input_numels,
257
+ all_gather_input_dtypes,
258
+ world_size,
259
+ device,
260
+ # NOTE: Under compile, make sure we always recreate all_gather_outputs
261
+ # per AllGather. See [Note: Invariants for torch.compile Traceable FSDP2].
262
+ force_recreate=True,
263
+ )
264
+ else:
265
+ fsdp_param.init_all_gather_outputs(
266
+ all_gather_input_numels, all_gather_input_dtypes, world_size, device
267
+ ) # no-op after 1st call
268
+ fsdp_param.alloc_all_gather_outputs()
269
+ all_gather_output = all_gather_output.view(world_size, -1)
270
+ gen = (t for fsdp_param in fsdp_params for t in fsdp_param.all_gather_outputs)
271
+ if all_gather_output.dtype == torch.uint8:
272
+ out = [t.view(world_size, -1).view(torch.uint8) for t in gen]
273
+ else:
274
+ out = [t.view(world_size, -1) for t in gen]
275
+ torch.ops.fsdp.split_with_sizes_copy(
276
+ all_gather_output, all_gather_input_split_sizes, dim=1, out=out
277
+ )
278
+
279
+
280
+ @torch.no_grad()
281
+ def foreach_reduce(
282
+ fsdp_params: List[FSDPParam],
283
+ unsharded_grads: List[torch.Tensor],
284
+ reduce_scatter_group: dist.ProcessGroup,
285
+ reduce_scatter_stream: torch.cuda.Stream,
286
+ orig_dtype: torch.dtype,
287
+ reduce_dtype: Optional[torch.dtype],
288
+ device: torch.device,
289
+ reduce_scatter_reduce_op: Optional[Union[dist.ReduceOp, dist.ReduceOp.RedOpType]],
290
+ all_reduce_group: Optional[dist.ProcessGroup], # not `None` iff HSDP
291
+ all_reduce_stream: torch.cuda.Stream,
292
+ all_reduce_grads: bool,
293
+ partial_reduce_output: Optional[torch.Tensor], # only used for HSDP
294
+ ) -> Tuple[torch.Tensor, torch.cuda.Event, torch.cuda.Event, Optional[torch.Tensor]]:
295
+ """
296
+ ``unsharded_grads`` owns the references to the gradients computed by
297
+ autograd, so clearing the list frees the gradients.
298
+ """
299
+ grad_dtypes = {grad.dtype for grad in unsharded_grads}
300
+ if len(grad_dtypes) != 1:
301
+ # Check this at runtime since it could be a real runtime error if e.g.
302
+ # fp8 weights do not produce the correct higher precision gradients
303
+ _raise_assert_with_print(
304
+ f"FSDP reduce-scatter expects uniform gradient dtype but got {grad_dtypes}"
305
+ )
306
+ grad_dtype = unsharded_grads[0].dtype
307
+ reduce_dtype = reduce_dtype or grad_dtype
308
+ predivide_factor, postdivide_factor = _get_gradient_divide_factors(
309
+ reduce_scatter_group, all_reduce_group, reduce_dtype
310
+ )
311
+ world_size = reduce_scatter_group.size()
312
+ padded_unsharded_sizes = tuple(
313
+ _get_dim0_padded_size(grad.size(), world_size) for grad in unsharded_grads
314
+ )
315
+ reduce_scatter_input_numel = sum(s.numel() for s in padded_unsharded_sizes)
316
+ reduce_scatter_output_numel = reduce_scatter_input_numel // world_size
317
+ reduce_scatter_input = torch.empty(
318
+ (reduce_scatter_input_numel,), dtype=reduce_dtype, device=device
319
+ )
320
+ foreach_reduce_scatter_copy_in(unsharded_grads, reduce_scatter_input, world_size)
321
+ current_stream = torch.cuda.current_stream()
322
+ # Only after the copy-in finishes can we free the gradients
323
+ unsharded_grads.clear()
324
+ reduce_scatter_stream.wait_stream(current_stream)
325
+ with torch.cuda.stream(reduce_scatter_stream):
326
+ reduce_output = reduce_scatter_input.new_empty((reduce_scatter_output_numel,))
327
+ _div_if_needed(reduce_scatter_input, predivide_factor)
328
+ if reduce_scatter_reduce_op is None:
329
+ if predivide_factor is None:
330
+ reduce_scatter_reduce_op = ReduceOp.AVG
331
+ else:
332
+ reduce_scatter_reduce_op = ReduceOp.SUM
333
+ dist.reduce_scatter_tensor(
334
+ output=reduce_output,
335
+ input=reduce_scatter_input,
336
+ group=reduce_scatter_group,
337
+ op=reduce_scatter_reduce_op,
338
+ )
339
+ reduce_scatter_event = reduce_scatter_stream.record_event()
340
+ post_reduce_stream = reduce_scatter_stream
341
+ if all_reduce_group is not None: # HSDP
342
+ # Accumulations must run in the reduce-scatter stream
343
+ if not all_reduce_grads:
344
+ if partial_reduce_output is not None:
345
+ partial_reduce_output += reduce_output
346
+ else:
347
+ partial_reduce_output = reduce_output
348
+ return (
349
+ reduce_scatter_input,
350
+ reduce_scatter_event,
351
+ post_reduce_stream.record_event(),
352
+ partial_reduce_output,
353
+ )
354
+ if partial_reduce_output is not None:
355
+ reduce_output += partial_reduce_output
356
+ post_reduce_stream = all_reduce_stream
357
+ all_reduce_stream.wait_stream(reduce_scatter_stream)
358
+ with torch.cuda.stream(all_reduce_stream):
359
+ dist.all_reduce(
360
+ reduce_output,
361
+ group=all_reduce_group,
362
+ op=ReduceOp.AVG if predivide_factor is None else ReduceOp.SUM,
363
+ )
364
+ with torch.cuda.stream(post_reduce_stream):
365
+ _div_if_needed(reduce_output, postdivide_factor)
366
+ reduce_output = _to_dtype_if_needed(reduce_output, orig_dtype)
367
+ # View out and accumulate sharded gradients
368
+ flat_grad_offset = 0 # [0, reduce_scatter_output_numel - 1]
369
+ for padded_unsharded_size, fsdp_param in zip(
370
+ padded_unsharded_sizes, fsdp_params
371
+ ):
372
+ new_sharded_grad = torch.as_strided(
373
+ reduce_output,
374
+ size=fsdp_param.sharded_size,
375
+ stride=fsdp_param.contiguous_sharded_stride,
376
+ storage_offset=flat_grad_offset,
377
+ )
378
+ to_accumulate_grad = fsdp_param.sharded_param.grad is not None
379
+ if fsdp_param.offload_to_cpu:
380
+ # Only overlap the D2H copy (copying to pinned memory) if not
381
+ # accumulating gradients since the CPU add kernel depends on
382
+ # the copy result and we cannot run the add as a callback
383
+ non_blocking = fsdp_param.pin_memory and not to_accumulate_grad
384
+ # Since the GPU sharded gradient is allocated in the RS stream,
385
+ # we can free it here by not keeping a ref without waiting for
386
+ # the D2H copy since future RS-stream ops run after the copy
387
+ new_sharded_grad = new_sharded_grad.to(
388
+ torch.device("cpu"), non_blocking=non_blocking
389
+ )
390
+ if non_blocking:
391
+ # Record an event on which to block the CPU thread to
392
+ # ensure that the D2H copy finishes before the optimizer
393
+ fsdp_param.grad_offload_event = reduce_scatter_stream.record_event()
394
+ if to_accumulate_grad:
395
+ assert isinstance(fsdp_param.sharded_param.grad, DTensor)
396
+ fsdp_param.sharded_param.grad._local_tensor += new_sharded_grad
397
+ else:
398
+ new_sharded_dtensor_grad = fsdp_param.to_sharded_dtensor(
399
+ new_sharded_grad
400
+ )
401
+ fsdp_param.sharded_param.grad = new_sharded_dtensor_grad
402
+ if not ca.compiled_autograd_enabled:
403
+ for hook in (
404
+ getattr(fsdp_param.sharded_param, "_post_accumulate_grad_hooks", {})
405
+ or {}
406
+ ).values():
407
+ hook(fsdp_param.sharded_param)
408
+ padded_sharded_numel = padded_unsharded_size.numel() // world_size
409
+ flat_grad_offset += padded_sharded_numel
410
+ post_reduce_event = post_reduce_stream.record_event()
411
+ # The RS output is allocated in the RS stream and used in the default
412
+ # stream (for optimizer). To ensure its memory is not reused for later
413
+ # RSs, we do not need extra synchronization since the sharded parameters
414
+ # hold refs through the end of backward.
415
+ return reduce_scatter_input, reduce_scatter_event, post_reduce_event, None
416
+
417
+
418
+ def foreach_reduce_scatter_copy_in(
419
+ unsharded_grads: List[torch.Tensor],
420
+ reduce_scatter_input: torch.Tensor,
421
+ world_size: int,
422
+ ) -> None:
423
+ reduce_scatter_input = reduce_scatter_input.view(world_size, -1)
424
+ torch.ops.fsdp.chunk_cat(
425
+ unsharded_grads, dim=0, num_chunks=world_size, out=reduce_scatter_input
426
+ )
427
+
428
+
429
+ def _get_all_gather_input_metadatas(
430
+ param_all_gather_inputs: List[List[torch.Tensor]],
431
+ ) -> Tuple[List[List[torch.dtype]], List[List[int]], torch.dtype]:
432
+ param_all_gather_input_dtypes: List[List[torch.dtype]] = []
433
+ param_all_gather_input_numels: List[List[int]] = []
434
+ all_gather_dtype = param_all_gather_inputs[0][0].dtype
435
+ for all_gather_inputs in param_all_gather_inputs:
436
+ input_dtypes: List[torch.dtype] = []
437
+ input_numels: List[int] = []
438
+ for all_gather_input in all_gather_inputs:
439
+ if all_gather_input.dtype != all_gather_dtype:
440
+ all_gather_dtype = torch.uint8
441
+ input_dtypes.append(all_gather_input.dtype)
442
+ input_numels.append(all_gather_input.numel())
443
+ param_all_gather_input_dtypes.append(input_dtypes)
444
+ param_all_gather_input_numels.append(input_numels)
445
+ return (
446
+ param_all_gather_input_dtypes,
447
+ param_all_gather_input_numels,
448
+ all_gather_dtype,
449
+ )
450
+
451
+
452
+ def _get_gradient_divide_factors(
453
+ reduce_scatter_group: dist.ProcessGroup,
454
+ all_reduce_group: Optional[dist.ProcessGroup],
455
+ reduce_dtype: torch.dtype,
456
+ ) -> Union[Tuple[None, None], Tuple[float, float]]:
457
+ # For fp32/bf16, we do not need to worry about overflow/underflow, so we
458
+ # use NCCL's built-in division to avoid separate div kernels
459
+ if reduce_dtype in (torch.float32, torch.bfloat16):
460
+ return None, None
461
+ data_parallel_size = reduce_scatter_group.size()
462
+ if all_reduce_group is not None:
463
+ data_parallel_size *= all_reduce_group.size()
464
+ # Since fp16 has smaller dynamic range than fp32/bf16, we want to avoid
465
+ # overflow/underflow. For N data parallel workers, each worker computes
466
+ # g_i, and they collectively reduce (g_1 + ... + g_N) / N. To avoid
467
+ # overflow/underflow, we divide by ~sqrt(N) before/after the reduction.
468
+ factor: int = 1
469
+ while data_parallel_size % factor == 0 and data_parallel_size / factor > factor:
470
+ factor *= 2
471
+ factor = float(factor)
472
+ return (factor, data_parallel_size / factor)
473
+
474
+
475
+ def _div_if_needed(tensor: torch.Tensor, div_factor: Optional[float]) -> None:
476
+ if div_factor is not None and div_factor > 1:
477
+ tensor.div_(div_factor)
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_common.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import math
3
+ import traceback
4
+ from dataclasses import dataclass
5
+ from enum import auto, Enum
6
+ from typing import Any, cast, List, Optional
7
+
8
+ import torch
9
+ import torch._dynamo.compiled_autograd as ca
10
+ import torch.distributed as dist
11
+ import torch.nn as nn
12
+ from torch.distributed._composable.contract import _get_registry
13
+ from torch.distributed.tensor import DeviceMesh, DTensor
14
+ from torch.distributed.tensor._dtensor_spec import DTensorSpec
15
+
16
+
17
+ @dataclass
18
+ class DataParallelMeshInfo:
19
+ mesh: DeviceMesh
20
+ shard_mesh_dim: Optional[int] = None
21
+ replicate_mesh_dim: Optional[int] = None
22
+
23
+ def __post_init__(self):
24
+ if self.shard_mesh_dim is None and self.replicate_mesh_dim is None:
25
+ raise AssertionError(
26
+ "At least one of shard_mesh_dim and replicate_mesh_dim must not be None"
27
+ )
28
+
29
+
30
+ @dataclass
31
+ class FSDPMeshInfo(DataParallelMeshInfo):
32
+ def __post_init__(self):
33
+ super().__post_init__()
34
+ if self.shard_mesh_dim is None:
35
+ raise AssertionError("Expects non-None shard_mesh_dim")
36
+ self.shard_mesh_size: int = self.mesh.size(self.shard_mesh_dim)
37
+ self.shard_process_group = self.mesh.get_group(self.shard_mesh_dim)
38
+ self.shard_mesh_rank: int = self.shard_process_group.rank()
39
+
40
+
41
+ @dataclass
42
+ class DDPMeshInfo(DataParallelMeshInfo):
43
+ def __post_init__(self):
44
+ super().__post_init__()
45
+ if self.replicate_mesh_dim is None:
46
+ raise AssertionError("Expects non-None replicate_mesh_dim")
47
+ self.replicate_mesh_size: int = self.mesh.size(self.replicate_mesh_dim)
48
+ self.replicate_process_group = self.mesh.get_group(self.replicate_mesh_dim)
49
+ self.replicate_mesh_rank: int = self.replicate_process_group.rank()
50
+
51
+
52
+ @dataclass
53
+ class HSDPMeshInfo(FSDPMeshInfo, DDPMeshInfo):
54
+ def __post_init__(self):
55
+ # Calls `FSDPMeshInfo` -> `DDPMeshInfo` -> `DataParallelMeshInfo`
56
+ super().__post_init__()
57
+
58
+
59
+ class TrainingState(Enum):
60
+ """Describes the training state of one FSDP state / parameter group."""
61
+
62
+ # Transition to forward starting pre-forward until post-forward
63
+ FORWARD = auto()
64
+ # Transition to pre-backward when unsharding in backward
65
+ PRE_BACKWARD = auto()
66
+ # Transition to post-backward when resharding and reducing gradients
67
+ POST_BACKWARD = auto()
68
+ # Idle before/after forward or before pre-backward/after post-backward
69
+ IDLE = auto()
70
+
71
+
72
+ def _raise_assert_with_print(*args: Any, **kwargs: Any):
73
+ print(f"[Rank {dist.get_rank()}] ", end="")
74
+ print(*args, **kwargs)
75
+ traceback.print_stack()
76
+ raise AssertionError(*args, **kwargs)
77
+
78
+
79
+ def _is_composable_with_fsdp(module: nn.Module) -> bool:
80
+ registry = _get_registry(module)
81
+ if registry is None:
82
+ return True
83
+ # Registry keys by function name
84
+ return "replicate" not in registry
85
+
86
+
87
+ def _get_dim0_padded_size(tensor_size: torch.Size, dim0_factor: int) -> torch.Size:
88
+ padded_dim0 = math.ceil(tensor_size[0] / dim0_factor) * dim0_factor
89
+ return cast(torch.Size, torch.Size([padded_dim0]) + tensor_size[1:])
90
+
91
+
92
+ def _chunk_with_empty(
93
+ tensor: torch.Tensor, num_chunks: int, dim: int
94
+ ) -> List[torch.Tensor]:
95
+ chunks = list(torch.chunk(tensor, num_chunks, dim=dim))
96
+ while len(chunks) < num_chunks:
97
+ chunks.append(chunks[0].new_empty(0))
98
+ return chunks
99
+
100
+
101
+ def _get_dim0_chunked_size(
102
+ chunk: torch.Tensor, unchunked_size: torch.Size
103
+ ) -> torch.Size:
104
+ if chunk.numel() > 0:
105
+ return chunk.size()
106
+ # For 0 numel, we need to preserve trailing dims for DTensor APIs
107
+ return cast(torch.Size, torch.Size([0]) + unchunked_size[1:])
108
+
109
+
110
+ def _from_local_no_grad(
111
+ local_tensor: torch.Tensor,
112
+ sharding_spec: DTensorSpec,
113
+ ) -> DTensor:
114
+ """
115
+ This method is similar to ``DTensor.from_local()`` except that in eager mode
116
+ it avoids some CPU overhead by avoiding default args and not being differentiable.
117
+ """
118
+
119
+ if not ca.compiled_autograd_enabled:
120
+ return DTensor(
121
+ # Use the local tensor directly instead of constructing a new tensor
122
+ # variable, e.g. with `view_as()`, since this is not differentiable
123
+ local_tensor,
124
+ sharding_spec,
125
+ requires_grad=local_tensor.requires_grad,
126
+ )
127
+ else:
128
+ return DTensor.from_local(
129
+ local_tensor,
130
+ sharding_spec.mesh,
131
+ sharding_spec.placements,
132
+ shape=sharding_spec.shape,
133
+ stride=sharding_spec.stride,
134
+ )
135
+
136
+
137
+ def _to_dtype_if_needed(
138
+ tensor: torch.Tensor, dtype: Optional[torch.dtype]
139
+ ) -> torch.Tensor:
140
+ if dtype is not None and tensor.dtype != dtype:
141
+ return tensor.to(dtype)
142
+ return tensor
143
+
144
+
145
+ def _cast_fp_tensor(dtype: torch.dtype, x: torch.Tensor) -> torch.Tensor:
146
+ if (
147
+ not isinstance(x, torch.Tensor)
148
+ or not torch.is_floating_point(x)
149
+ or x.dtype == dtype
150
+ ):
151
+ return x
152
+ return x.to(dtype)
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_init.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from typing import List, Optional, Set, Tuple, Union
3
+
4
+ import torch
5
+ import torch.distributed as dist
6
+ import torch.nn as nn
7
+ from torch.distributed.device_mesh import _get_device_handle
8
+ from torch.distributed.tensor import DeviceMesh, DTensor, init_device_mesh
9
+ from torch.utils._python_dispatch import is_traceable_wrapper_subclass
10
+
11
+ from ._fsdp_common import _is_composable_with_fsdp, FSDPMeshInfo, HSDPMeshInfo
12
+ from ._fsdp_state import _get_module_fsdp_state
13
+
14
+
15
+ def _get_post_forward_mesh_info(
16
+ reshard_after_forward: Union[bool, int], mesh_info: FSDPMeshInfo
17
+ ) -> Optional[FSDPMeshInfo]:
18
+ shard_mesh_size = mesh_info.shard_mesh_size
19
+ if not isinstance(reshard_after_forward, (bool, int)):
20
+ raise ValueError(
21
+ "reshard_after_forward should be a bool or an int representing the "
22
+ f"group size to reshard to, not {reshard_after_forward}"
23
+ )
24
+ # NOTE: `isinstance(False, int)` returns `True`.
25
+ if not isinstance(reshard_after_forward, bool) and isinstance(
26
+ reshard_after_forward, int
27
+ ):
28
+ if (
29
+ reshard_after_forward < 1
30
+ or reshard_after_forward > shard_mesh_size
31
+ or shard_mesh_size % reshard_after_forward != 0
32
+ ):
33
+ raise ValueError(
34
+ "If passing reshard_after_forward as an int, it should be a "
35
+ f"factor of {shard_mesh_size}, not {reshard_after_forward}"
36
+ )
37
+ elif reshard_after_forward == 1:
38
+ reshard_after_forward = False
39
+ elif reshard_after_forward == shard_mesh_size:
40
+ reshard_after_forward = True
41
+ post_forward_mesh_info = None
42
+ if reshard_after_forward is True:
43
+ post_forward_mesh_info = mesh_info
44
+ elif reshard_after_forward is not False: # int case
45
+ # For HSDP, we can flatten the two replicate dims into the 0th dim
46
+ post_forward_mesh_tensor = mesh_info.mesh.mesh.view(-1, reshard_after_forward)
47
+ post_forward_mesh = DeviceMesh(
48
+ mesh_info.mesh.device_type, post_forward_mesh_tensor
49
+ )
50
+ post_forward_mesh_info = HSDPMeshInfo(
51
+ post_forward_mesh, shard_mesh_dim=1, replicate_mesh_dim=0
52
+ )
53
+ return post_forward_mesh_info
54
+
55
+
56
+ def _init_default_fully_shard_mesh() -> DeviceMesh:
57
+ """Default to global CUDA mesh if possible else global CPU mesh."""
58
+ if not dist.distributed_c10d.is_initialized():
59
+ dist.distributed_c10d.init_process_group()
60
+ default_pg = dist.distributed_c10d._get_default_group()
61
+ device_type = "cuda" if torch.cuda.is_available() else "cpu"
62
+ mesh = init_device_mesh(device_type, mesh_shape=(default_pg.size(),))
63
+ return mesh
64
+
65
+
66
+ def _get_device_from_mesh(mesh: DeviceMesh) -> torch.device:
67
+ if mesh.device_type == "cpu":
68
+ return torch.device("cpu")
69
+ device_handle = _get_device_handle(mesh.device_type)
70
+ return torch.device(mesh.device_type, device_handle.current_device())
71
+
72
+
73
+ def _get_managed_modules(root_modules: Tuple[nn.Module, ...]) -> List[nn.Module]:
74
+ modules: List[nn.Module] = []
75
+ root_modules_set = set(root_modules)
76
+ # Track visisted modules to avoid visiting shared modules multiple times
77
+ visited_modules: Set[nn.Module] = set()
78
+
79
+ def dfs(module: nn.Module) -> None:
80
+ """
81
+ Runs a DFS to collect managed modules, not recursing into modules with
82
+ a non-composable API or ``fully_shard`` already applied.
83
+ """
84
+ if not _is_composable_with_fsdp(module):
85
+ return
86
+ elif (
87
+ module not in root_modules_set
88
+ and _get_module_fsdp_state(module) is not None
89
+ ):
90
+ return # nested `fully_shard` module
91
+ visited_modules.add(module)
92
+ for submodule in module.children():
93
+ if submodule not in visited_modules:
94
+ dfs(submodule)
95
+ modules.append(module)
96
+
97
+ for root_module in root_modules:
98
+ dfs(root_module)
99
+ return modules
100
+
101
+
102
+ def _verify_managed_param(name: str, param: nn.Parameter) -> None:
103
+ """
104
+ Verify if the parameter is accepted by fully_shard. The only restriction now
105
+ is that the parameter cannot be a scalar tensor (param.numel == 0) since we
106
+ need at least one dim to shard.
107
+ """
108
+ if len(param.shape) == 0:
109
+ raise ValueError(
110
+ "fully_shard doesn't support salar parameters. "
111
+ f"Change {name} to a 1D tensor with numel equal to 1."
112
+ )
113
+
114
+
115
+ def _get_managed_states(
116
+ modules: List[nn.Module],
117
+ ) -> Tuple[List[nn.Parameter], List[torch.Tensor]]:
118
+ params: List[nn.Parameter] = []
119
+ buffers: List[torch.Tensor] = []
120
+ # Track visited parameters/buffers to avoid visiting shared parameters and
121
+ # buffers multiple times
122
+ visited_params: Set[nn.Parameter] = set()
123
+ visited_buffers: Set[torch.Tensor] = set()
124
+ for module in modules:
125
+ for name, param in module.named_parameters(recurse=False):
126
+ if param not in visited_params:
127
+ _verify_managed_param(name, param)
128
+ params.append(param)
129
+ visited_params.add(param)
130
+ for buffer in module.buffers(recurse=False):
131
+ if buffer not in visited_buffers:
132
+ buffers.append(buffer)
133
+ visited_buffers.add(buffer)
134
+ return params, buffers
135
+
136
+
137
+ def _move_states_to_device(
138
+ params: List[nn.Parameter],
139
+ buffers: List[torch.Tensor],
140
+ device: torch.device,
141
+ ) -> None:
142
+ """
143
+ We have FSDP move states to device for simpler and faster initialization
144
+ since FSDP almost always uses CUDA for training. We move parameters/buffers
145
+ rather than modules since modules to support ignoring parameters/buffers in
146
+ the future.
147
+ """
148
+ # Follow the logic in `nn.Module._apply`
149
+ for tensor in itertools.chain(params, buffers):
150
+ if tensor.device == device or tensor.device.type == "meta":
151
+ # Keep meta-device tensors on meta device for deferred init
152
+ continue
153
+ if isinstance(tensor, DTensor):
154
+ if (dtensor_mesh_type := tensor.device_mesh.device_type) != device.type:
155
+ raise ValueError(
156
+ "Requires DTensor to have mesh of the same type as the FSDP mesh "
157
+ f"but got {dtensor_mesh_type} for DTensor and {device.type} for FSDP"
158
+ )
159
+ raise AssertionError(
160
+ f"Expects DTensor to be moved to {dtensor_mesh_type} but got {tensor.device}"
161
+ )
162
+ tensor_ = tensor
163
+ if is_traceable_wrapper_subclass(tensor_):
164
+ with torch.no_grad(): # avoid autograd increasing C++ refcount by 1
165
+ tensor_on_device = nn.Parameter(tensor.to(device))
166
+ torch.utils.swap_tensors(tensor, tensor_on_device)
167
+ else:
168
+ tensor.data = tensor.to(device)
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param.py ADDED
@@ -0,0 +1,754 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import itertools
3
+ from dataclasses import dataclass, field
4
+ from enum import auto, Enum
5
+ from typing import Any, cast, List, Optional, Sequence, Tuple
6
+
7
+ import torch
8
+ import torch._dynamo.compiled_autograd as ca
9
+ import torch.nn as nn
10
+ from torch._prims_common import make_contiguous_strides_for
11
+ from torch.distributed._functional_collectives import AsyncCollectiveTensor
12
+ from torch.distributed.tensor import DTensor, Replicate, Shard
13
+ from torch.distributed.tensor._dtensor_spec import DTensorSpec, TensorMeta
14
+ from torch.distributed.tensor.device_mesh import _mesh_resources
15
+ from torch.distributed.tensor.placement_types import _StridedShard, Placement
16
+
17
+ from ._fsdp_api import CPUOffloadPolicy, MixedPrecisionPolicy, OffloadPolicy
18
+ from ._fsdp_common import (
19
+ _chunk_with_empty,
20
+ _from_local_no_grad,
21
+ _get_dim0_chunked_size,
22
+ _raise_assert_with_print,
23
+ _to_dtype_if_needed,
24
+ FSDPMeshInfo,
25
+ HSDPMeshInfo,
26
+ )
27
+
28
+
29
+ """
30
+ [Note: FSDP tensors]
31
+ FSDP considers the following tensors:
32
+ - Original parameter: parameter passed to :class:`FSDPParam`, i.e. the one
33
+ on the module when applying FSDP
34
+ - Sharded parameter: sharding the original parameter on dim-0 as a DTensor
35
+ over the main mesh
36
+ - All-gather inputs: the ``torch.Tensor`` or ``Tensor`` s passed to all-gather,
37
+ derived from the sharded parameter
38
+ - All-gather output: the ``torch.Tensor`` or ``Tensor`` s resulting from
39
+ all-gathering the all-gather inputs
40
+ - Unsharded parameter: parameter used for forward/backward computation, derived
41
+ from the all-gather output; autograd leaf
42
+
43
+ We define these tensors to describe the general framework that can accomodate
44
+ extensions, where:
45
+ - all-gather-inputs = pre-all-gather-transform(sharded-parameter)
46
+ - unsharded-parameter = post-all-gather-transform(all-gather-outputs)
47
+
48
+ For the default ``torch.Tensor`` case, there is only one all-gather input, and
49
+ it shares the same underlying tensor data as the sharded parameter, meaning
50
+ that they can be thought of as the same tensors. The same applies for the
51
+ all-gather output and unsharded parameter. For non-``torch.Tensor`` extensions,
52
+ these equivalences may no longer hold due to the pre/post-all-gather
53
+ transforms, and some may have multiple all-gather inputs/outputs (e.g.
54
+ quantized data and scales).
55
+
56
+ [Note: FSDP and autograd]
57
+ FSDP dynamically frees and allocates the unsharded parameter. Since autograd
58
+ can pack a reference to it or a view to save for backward, we use storage
59
+ resizing to implement the freeing/allocation since that preserves the aliasing.
60
+ This implies that we construct the unsharded parameter object once and write to
61
+ it in-place thereafter. For the default ``torch.Tensor` original parameter
62
+ case, the all-gather output and unsharded parameter share the same
63
+ data, so we use storage resizing on the all-gather output.
64
+ """
65
+
66
+ lib = torch.library.Library("fsdp", "FRAGMENT") # noqa: TOR901
67
+
68
+ lib.define("set_(Tensor(a!) tensor, Tensor data) -> ()")
69
+
70
+
71
+ @torch.library.impl(lib, "set_", "Meta")
72
+ @torch.library.impl(lib, "set_", "CUDA")
73
+ @torch.library.impl(lib, "set_", "CPU")
74
+ def set_(tensor, data):
75
+ tensor.set_(data)
76
+
77
+
78
+ """
79
+ [Note: Avoiding functionalization for fsdp.set_ and inductor.resize_storage_bytes_(0)]
80
+
81
+ Currently we don't functionalize `fsdp.set_` op or `inductor.resize_storage_bytes_(0)` op
82
+ (i.e. they show up as a mutation op in the middle of the AOT joint graph).
83
+
84
+ Reason:
85
+ Traceable FSDP2 compiled autograd BWD graph have the following traits:
86
+ (1) Two inputs of the graph were aliased to each other (one from hook closed-over tensors, one from FWD saved tensors).
87
+ (2) One of them is mutated (set_ and resize_(0) to handle the all-gathered param).
88
+ (3) They are both subclasses.
89
+ The combination of these traits is not supported by AOTAutograd (it's difficult to reason about subclass aliasing).
90
+ So this doesn't work at all for Traceable FSDP2.
91
+
92
+ The compromise we use is to avoid functionalization for the FSDP2 set_ and resize_(0) ops.
93
+ This avoids the problem above, because from AOTAutograd point-of-view there are no mutations
94
+ that functionalization needs to handle. (Although we need to be careful not to DCE those mutable ops.)
95
+
96
+ We can avoid this functionalization because:
97
+ (1) The nn.Parameter is never used before its .set_() is called in eager code (i.e. no alias of it is created),
98
+ so it's safe to call .set_() in the middle of the graph to swap out its storage and start using the nn.Parameter downstream.
99
+ (2) We always re-allocate the buffer for nn.Parameter to store the AllGather output and to be used in downstream user ops.
100
+ So calling resize-to-0 in the middle of the graph to free nn.Parameter memory after use should always be okay
101
+ (since we always allocate anew next time we need it, we strictly don't need to keep the old tensor storage around anymore).
102
+
103
+ Q: But doesn't the torch.compile stack have the "functional graph" assumption in many places?
104
+ A: Yes - this is WIP but we will try to get back to functional graph as early as possible in the lowering process.
105
+ Specifically, we believe we can move both .set_ and .resize_(0) ops to end of graph in AOT joint graph before partitioner
106
+ (i.e. effectively "re-functionalizing" those ops). Put it in another way, we avoid functionalization for those two ops just to
107
+ make AOTAutograd alias analysis happy, and as soon as we are past that point, we "re-functionalize" the graph.
108
+ This requires a custom FX pass but we believe it's not hard to write and maintain.
109
+
110
+ Q: What's the importance of partitioner not saving views of nn.Parameter as FWD saved tensors?
111
+ A: This is critical: we do want to save FWD nn.Parameter graph input (instead of its view) for BWD use,
112
+ so that downstream ops in BWD graph uses the post-`.set_` nn.Parameter instead of any of its saved views as input.
113
+ This is because .set_ will not update any of the nn.Parameter's views, so BWD downstream ops must use the original
114
+ nn.Parameter in order to see the result of .set_.
115
+ """
116
+
117
+
118
+ @torch.library.impl(lib, "set_", "Functionalize")
119
+ def set__functionalize(tensor, data):
120
+ torch._sync(tensor)
121
+ torch._sync(data)
122
+ # AOTDispatcher needs to know if any inputs had their storages mutated.
123
+ # (Why? It sometimes detaches inputs before sending them into the graph,
124
+ # when it sees that they do not need to have any gradients computed)
125
+ torch._functionalize_set_storage_changed(tensor)
126
+ tensor_inner = torch._from_functional_tensor(tensor)
127
+ data_inner = torch._from_functional_tensor(data)
128
+ with torch._C._ExcludeDispatchKeyGuard(
129
+ torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
130
+ ):
131
+ torch.ops.fsdp.set_.default(tensor_inner, data_inner)
132
+
133
+
134
+ torch.fx.node.has_side_effect(torch.ops.fsdp.set_.default)
135
+
136
+
137
+ class ShardedState(Enum):
138
+ """
139
+ - ``SHARDED``: The sharded parameter is registered to the module. It is the
140
+ only contributor to parameter memory.
141
+ - ``SHARDED_POST_FORWARD``: The unsharded parameter is resharded to a
142
+ smaller world size. Since this data should not be used for computation,
143
+ we do not register it to the module. Users should reshard the module
144
+ before any in-place modifications. Both it and the sharded parameter
145
+ contribute to parameter memory.
146
+ - ``UNSHARDED``: The unsharded parameter is registered to the module. Both
147
+ it and the sharded parameter contribute to parameter memory.
148
+ """
149
+
150
+ SHARDED = auto()
151
+ SHARDED_POST_FORWARD = auto()
152
+ UNSHARDED = auto()
153
+
154
+
155
+ @dataclass
156
+ class ParamModuleInfo:
157
+ """
158
+ For a parameter, this stores the module and the parameter name to be able
159
+ to do a parameter swap via ``setattr(module, param_name, ...)`` or to get
160
+ the parameter via ``getattr(module, param_name)``. We additionally save
161
+ shared modules and shared parameter names to update them accordingly.
162
+ """
163
+
164
+ # Parameter names are unprefixed, e.g. "weight", not "lin.weight"
165
+ module: nn.Module
166
+ param_name: str
167
+ shared_modules: List[nn.Module] = field(default_factory=list)
168
+ shared_param_names: List[str] = field(default_factory=list)
169
+
170
+
171
+ @dataclass
172
+ class ExtensionsData:
173
+ # User-defined metadata passed from pre to post-all-gather
174
+ all_gather_metadata: Optional[Any] = None
175
+ # Save the all-gather input sizes to unflatten the all-gather outputs to ND
176
+ all_gather_input_sizes: Sequence[torch.Size] = () # ND
177
+
178
+ def clear(self):
179
+ self.all_gather_metadata = None
180
+ self.all_gather_input_sizes = ()
181
+
182
+
183
+ class FSDPParam:
184
+ """
185
+ This class manages a parameter with FSDP or FSDP variants applied,
186
+ implementing dim-0 per-parameter sharding.
187
+ """
188
+
189
+ orig_dtype: torch.dtype
190
+ param_dtype: Optional[torch.dtype]
191
+ reduce_dtype: Optional[torch.dtype]
192
+ _orig_size: torch.Size # ND
193
+ sharded_size: torch.Size # ND
194
+ contiguous_sharded_stride: Tuple[int, ...]
195
+ padded_sharded_param_size: torch.Size # ND
196
+ sharded_post_forward_size: torch.Size # ND
197
+ contiguous_sharded_post_forward_stride: Tuple[int, ...]
198
+ _sharded_param_data: torch.Tensor # 1D
199
+ sharded_param: nn.Parameter # ND
200
+ _sharded_post_forward_param_data: Optional[torch.Tensor] # 1D
201
+ _sharded_post_forward_param: Optional[nn.Parameter] # ND
202
+ _unsharded_param: nn.Parameter # ND
203
+ unsharded_accumulated_grad: Optional[torch.Tensor] # ND
204
+ _sharding_spec: DTensorSpec
205
+ # DTensor attributes (only defined for DTensor `param`):
206
+ _tp_spec: DTensorSpec
207
+ all_gather_outputs: List[torch.Tensor] # 1D
208
+ # All-gather extension attributes
209
+ _extensions_data: ExtensionsData
210
+ _unsharded_inner_tensors: List[torch.Tensor]
211
+
212
+ def __init__(
213
+ self,
214
+ param: nn.Parameter,
215
+ module_info: ParamModuleInfo,
216
+ mesh_info: FSDPMeshInfo,
217
+ post_forward_mesh_info: Optional[FSDPMeshInfo],
218
+ device: torch.device,
219
+ mp_policy: MixedPrecisionPolicy,
220
+ offload_policy: OffloadPolicy,
221
+ ):
222
+ self._module_info: ParamModuleInfo = module_info
223
+ self.mesh_info = mesh_info
224
+ self.post_forward_mesh_info = post_forward_mesh_info
225
+ self.device = device
226
+ self.offload_to_cpu: bool = isinstance(offload_policy, CPUOffloadPolicy)
227
+ self.pin_memory = (
228
+ self.offload_to_cpu and cast(CPUOffloadPolicy, offload_policy).pin_memory
229
+ )
230
+ self.grad_offload_event: Optional[torch.cuda.Event] = None
231
+ self._init_sharded_param(param, device)
232
+ if self.post_forward_mesh_info:
233
+ self._init_sharded_post_forward_param_metadata(param)
234
+ self._init_extensions()
235
+ self.all_gather_outputs: List[torch.Tensor] = []
236
+ self.unsharded_accumulated_grad = None
237
+ self._param_fqn: Optional[str] = None # prefixed from root module
238
+ # TODO: Remove this padding logic once DTensor pads the local tensor:
239
+ # https://github.com/pytorch/pytorch/issues/113045
240
+ self._post_load_hook_handle = (
241
+ module_info.module.register_load_state_dict_post_hook(
242
+ lambda *args, **kwargs: self.reset_sharded_param()
243
+ )
244
+ )
245
+
246
+ @torch.no_grad()
247
+ def _init_sharded_param(self, param: nn.Parameter, device: torch.device):
248
+ if param.device != device and param.device.type != "meta":
249
+ raise AssertionError(
250
+ f"Expects the parameter to already be moved to device {device} but got {param.device}"
251
+ )
252
+ # TODO: Replace the sharded DTensor parameter construction logic with
253
+ # `distribute_tensor` after https://github.com/pytorch/pytorch/issues/116101
254
+ # TODO: Simplify the following sharded parameter padding logic after
255
+ # https://github.com/pytorch/pytorch/issues/113045
256
+ self.is_dtensor = isinstance(param, DTensor)
257
+ if self.is_dtensor:
258
+ self._tp_spec = cast(DTensor, param)._spec
259
+ dp_mesh, tp_mesh = (self.mesh_info.mesh, self._tp_spec.mesh)
260
+ dp_global_mesh = _mesh_resources.get_root_mesh(dp_mesh)
261
+ tp_global_mesh = _mesh_resources.get_root_mesh(tp_mesh)
262
+ if dp_global_mesh != tp_global_mesh or (
263
+ dp_global_mesh is None or tp_global_mesh is None
264
+ ):
265
+ raise AssertionError(
266
+ "FSDP requires the DP and TP mesh to have the same parent mesh but got: \n"
267
+ f"DP's global mesh: {dp_global_mesh}\nTP's global mesh: {tp_global_mesh}"
268
+ )
269
+
270
+ name_dims_error = "FSDP requires named DeviceMesh dims for ND parallelism"
271
+ assert dp_mesh.mesh_dim_names is not None, name_dims_error
272
+ assert tp_mesh.mesh_dim_names is not None, name_dims_error
273
+ submesh_names = dp_mesh.mesh_dim_names + tp_mesh.mesh_dim_names
274
+ self._spmd_mesh = dp_global_mesh[submesh_names]
275
+ if len(self._tp_spec.placements) != 1:
276
+ raise NotImplementedError(
277
+ f"FSDP only supports 1D TP, not {self._tp_spec.placements}"
278
+ )
279
+ split_factor = self._tp_spec.num_shards_map[0]
280
+ assert (
281
+ 2 <= self._spmd_mesh.ndim <= 3
282
+ ), f"_spmd_mesh.ndim can only be 2 or 3 but got {self._spmd_mesh.ndim}."
283
+ self._spmd_placements: Tuple[Placement, ...]
284
+ dp_shard_tp_placement = (
285
+ (
286
+ _StridedShard(0, split_factor=split_factor)
287
+ if split_factor > 1
288
+ else Shard(0)
289
+ ),
290
+ self._tp_spec.placements[0],
291
+ )
292
+ if self._spmd_mesh.ndim == 2:
293
+ self._spmd_placements = dp_shard_tp_placement
294
+ else:
295
+ assert self.mesh_info.replicate_mesh_dim == 0
296
+ self._spmd_placements = (Replicate(),) + dp_shard_tp_placement
297
+ self._sharding_spec = DTensorSpec(
298
+ self._spmd_mesh,
299
+ self._spmd_placements,
300
+ tensor_meta=self._tp_spec.tensor_meta,
301
+ )
302
+ # NOTE: FSDP+TP does not support uneven sharding for now
303
+ # TODO: enable uneven sharding for FSDP+TP
304
+ if split_factor > 1: # FSDP has strided sharding on tensor dim 0
305
+ num_shards = self._sharding_spec.num_shards_map[0]
306
+ tensor_size_dim_0 = self._sharding_spec.shape[0]
307
+ if tensor_size_dim_0 % num_shards != 0:
308
+ raise NotImplementedError(
309
+ "FSDP+TP sharding does not support uneven sharding for now: "
310
+ f"tensor dim 0 has size {tensor_size_dim_0} which cannot be "
311
+ f"evenly sharded into {num_shards} shards."
312
+ )
313
+
314
+ param_data = cast(DTensor, param)._local_tensor
315
+ else:
316
+ self._spmd_mesh = self.mesh_info.mesh
317
+ if isinstance(self.mesh_info, HSDPMeshInfo):
318
+ self._spmd_placements = (Replicate(), Shard(0))
319
+ else:
320
+ self._spmd_placements = (Shard(0),)
321
+ self._sharding_spec = DTensorSpec(
322
+ self._spmd_mesh,
323
+ self._spmd_placements,
324
+ tensor_meta=TensorMeta(
325
+ param.size(),
326
+ param.stride(),
327
+ param.dtype,
328
+ ),
329
+ )
330
+ param_data = param
331
+ self._orig_size = param_data.size()
332
+ self._contiguous_orig_stride = make_contiguous_strides_for(self._orig_size)
333
+ shard_rank = self.mesh_info.shard_mesh_rank
334
+ shard_world_size = self.mesh_info.shard_mesh_size
335
+ chunks = _chunk_with_empty(param_data, shard_world_size, dim=0)
336
+ sharded_param = chunks[shard_rank]
337
+ self.sharded_size = _get_dim0_chunked_size(sharded_param, param_data.size())
338
+ self.contiguous_sharded_stride = make_contiguous_strides_for(self.sharded_size)
339
+ padded_sharded_size = chunks[0].size() # 0th always padded
340
+ padded_sharded_param = param_data.new_zeros(padded_sharded_size)
341
+ self.padded_sharded_param_size = padded_sharded_param.size()
342
+ if sharded_param.numel() > 0:
343
+ padded_sharded_param[: sharded_param.size(0)].copy_(sharded_param)
344
+ if self.offload_to_cpu and not padded_sharded_param.is_meta:
345
+ padded_sharded_param = padded_sharded_param.cpu()
346
+ if self.pin_memory:
347
+ padded_sharded_param = padded_sharded_param.pin_memory()
348
+ self._sharded_param_data = padded_sharded_param.view(-1)
349
+ self.sharded_param = nn.Parameter(
350
+ self.to_sharded_dtensor(padded_sharded_param[: sharded_param.size(0)])
351
+ )
352
+ self.sharded_param.requires_grad_(param.requires_grad)
353
+ # Let `param_data` be freed normally when its ref count reaches 0 when
354
+ # the `fully_shard` call returns to allow provided parameters to alias
355
+ self._setattr_on_modules(self.sharded_param)
356
+ self.sharded_state = ShardedState.SHARDED
357
+
358
+ def _init_sharded_post_forward_param_metadata(self, param: torch.Tensor) -> None:
359
+ mesh_info = self.post_forward_mesh_info
360
+ assert mesh_info is not None # mypy
361
+ param_data = param._local_tensor if isinstance(param, DTensor) else param
362
+ chunks = _chunk_with_empty(param_data, mesh_info.shard_mesh_size, dim=0)
363
+ self.sharded_post_forward_size = _get_dim0_chunked_size(
364
+ chunks[mesh_info.shard_mesh_rank], param_data.size()
365
+ )
366
+ self.contiguous_sharded_post_forward_stride = make_contiguous_strides_for(
367
+ self.sharded_post_forward_size
368
+ )
369
+
370
+ def init_dtype_attrs(self, mp_policy: MixedPrecisionPolicy):
371
+ param_dtype, reduce_dtype = (mp_policy.param_dtype, mp_policy.reduce_dtype)
372
+ self.orig_dtype = self.sharded_param.dtype
373
+ # Clamp `param_dtype` to `None` if no casting is required
374
+ if param_dtype == self.orig_dtype:
375
+ param_dtype = None
376
+ self.param_dtype = param_dtype
377
+ self.reduce_dtype = reduce_dtype
378
+ # None indicates that the mixed precision is not enabled
379
+
380
+ def _init_extensions(self) -> None:
381
+ inner_tensor = self._sharded_local_tensor
382
+ has_fsdp_pre_all_gather = hasattr(inner_tensor, "fsdp_pre_all_gather")
383
+ has_fsdp_post_all_gather = hasattr(inner_tensor, "fsdp_post_all_gather")
384
+ if has_fsdp_pre_all_gather != has_fsdp_post_all_gather:
385
+ raise AssertionError(
386
+ "Both fsdp_pre_all_gather and fsdp_post_all_gather should be defined "
387
+ f"if using all-gather extensions: {inner_tensor}"
388
+ )
389
+ if has_fsdp_pre_all_gather:
390
+ if self.padded_sharded_param_size != self._sharded_local_tensor.size():
391
+ raise NotImplementedError(
392
+ "FSDP all-gather extensions require even sharding on dim-0.\n"
393
+ f"{self._orig_size} is not divisible by FSDP world size {self.mesh_info.mesh.size()}."
394
+ )
395
+ self._extensions_data = ExtensionsData()
396
+ self._unsharded_inner_tensors: List[torch.Tensor] = []
397
+
398
+ def init_all_gather_outputs(
399
+ self,
400
+ all_gather_input_numels: List[int],
401
+ all_gather_input_dtypes: List[torch.dtype],
402
+ world_size: int,
403
+ device: torch.device,
404
+ force_recreate: bool = False,
405
+ ):
406
+ if not force_recreate and len(self.all_gather_outputs) > 0:
407
+ return # already initialized
408
+ self.all_gather_outputs = [
409
+ torch.empty(torch.Size([numel * world_size]), dtype=dtype, device=device)
410
+ for numel, dtype in zip(all_gather_input_numels, all_gather_input_dtypes)
411
+ ]
412
+
413
+ def init_unsharded_param(self):
414
+ """
415
+ [Note: Invariants for torch.compile Traceable FSDP2]
416
+ 1. Under compile, we always re-populate the content of `self._unsharded_param`
417
+ per AllGather using the slow path.
418
+ 2. Under compile, we always recreate `self.all_gather_outputs` per AllGather.
419
+ This is to ensure the buffer creation is internal to the graph and
420
+ avoid `self.all_gather_outputs` being captured as a graph input.
421
+ 3. Under compile, at the end of `free_unsharded_param()`, we always clean up
422
+ `self.all_gather_outputs` and `self._unsharded_inner_tensors`,
423
+ to avoid them being captured as graph output.
424
+
425
+ With these invariants, only these tensors will be inputs to the graph:
426
+ - Sharded parameters
427
+ - Placeholders for the `self._unsharded_param` nn.Parameter
428
+ """
429
+ if not ca.compiled_autograd_enabled and hasattr(
430
+ self, "_unsharded_param"
431
+ ): # after the 1st all-gather
432
+ inner_tensor = self._sharded_local_tensor
433
+ if not hasattr(inner_tensor, "fsdp_post_all_gather"):
434
+ return # already initialized
435
+ for tensor in self._unsharded_inner_tensors:
436
+ alloc_storage(tensor)
437
+ all_gather_outputs = self._unflatten_all_gather_outputs()
438
+ inner_tensor.fsdp_post_all_gather(
439
+ all_gather_outputs,
440
+ self._extensions_data.all_gather_metadata,
441
+ self.param_dtype or self.orig_dtype,
442
+ out=self._unsharded_param,
443
+ )
444
+ self._extensions_data.clear()
445
+ return
446
+ inner_tensor = self._sharded_local_tensor
447
+ if not ca.compiled_autograd_enabled and hasattr(
448
+ inner_tensor, "fsdp_post_all_gather"
449
+ ):
450
+ all_gather_outputs = self._unflatten_all_gather_outputs()
451
+ (
452
+ unsharded_tensor,
453
+ self._unsharded_inner_tensors,
454
+ ) = inner_tensor.fsdp_post_all_gather(
455
+ all_gather_outputs,
456
+ self._extensions_data.all_gather_metadata,
457
+ self.param_dtype or self.orig_dtype,
458
+ )
459
+ self._extensions_data.clear()
460
+ else:
461
+ # For the default path (no post-all-gather), the all-gather output
462
+ # gives the unsharded parameter data directly
463
+ assert len(self.all_gather_outputs) == 1, f"{len(self.all_gather_outputs)}"
464
+ unsharded_tensor = self.all_gather_outputs[0]
465
+ unsharded_param = torch.as_strided(
466
+ unsharded_tensor,
467
+ self._orig_size,
468
+ self._contiguous_orig_stride,
469
+ storage_offset=0,
470
+ )
471
+ if self.is_dtensor:
472
+ unsharded_param = _from_local_no_grad(unsharded_param, self._tp_spec)
473
+ if hasattr(self, "_unsharded_param"):
474
+ assert ca.compiled_autograd_enabled
475
+ with torch.no_grad(), torch.autograd._unsafe_preserve_version_counter(
476
+ self._unsharded_param
477
+ ):
478
+ torch.ops.fsdp.set_.default(self._unsharded_param, unsharded_param)
479
+ else:
480
+ self._unsharded_param = nn.Parameter(
481
+ unsharded_param, requires_grad=self.sharded_param.requires_grad
482
+ )
483
+
484
+ def _unflatten_all_gather_outputs(self) -> Tuple[torch.Tensor, ...]:
485
+ return tuple(
486
+ t.view(-1, *s[1:])
487
+ for t, s in zip(
488
+ self.all_gather_outputs, self._extensions_data.all_gather_input_sizes
489
+ )
490
+ )
491
+
492
+ def to_sharded(self) -> None:
493
+ self._setattr_on_modules(self.sharded_param)
494
+ self.free_unsharded_param()
495
+ self.sharded_state = ShardedState.SHARDED
496
+
497
+ def to_sharded_post_forward(self) -> None:
498
+ if self.is_dtensor:
499
+ raise NotImplementedError(
500
+ "Resharding to smaller mesh with TP is not supported yet"
501
+ )
502
+ self._assert_in_states(ShardedState.UNSHARDED)
503
+ assert self.post_forward_mesh_info is not None # mypy
504
+ assert len(self.all_gather_outputs) == 1
505
+ shard_world_size = self.post_forward_mesh_info.shard_mesh_size
506
+ if (numel := self.all_gather_outputs[0].numel()) % shard_world_size != 0:
507
+ _raise_assert_with_print(
508
+ f"All-gather output size ({numel}) must be divisible by the shard "
509
+ f"world size ({shard_world_size})"
510
+ )
511
+ shard_rank = self.post_forward_mesh_info.shard_mesh_rank
512
+ sharded_numel = numel // shard_world_size
513
+ self._sharded_post_forward_param_data = (
514
+ self.all_gather_outputs[0].narrow(
515
+ 0, sharded_numel * shard_rank, sharded_numel
516
+ )
517
+ ).clone() # clone to be able to free all-gather output
518
+ sharded_post_forward_tensor = torch.as_strided(
519
+ self._sharded_post_forward_param_data,
520
+ size=self.sharded_post_forward_size,
521
+ stride=self.contiguous_sharded_post_forward_stride,
522
+ storage_offset=0,
523
+ )
524
+ self._sharded_post_forward_param = nn.Parameter(
525
+ self.to_sharded_post_forward_dtensor(sharded_post_forward_tensor)
526
+ )
527
+ self._setattr_on_modules(self._sharded_post_forward_param)
528
+ self.free_unsharded_param()
529
+ self.sharded_state = ShardedState.SHARDED_POST_FORWARD
530
+
531
+ def to_unsharded(self) -> None:
532
+ # Assume that the data has been allocated and all-gathered
533
+ set_requires_grad_if_needed(self.sharded_param, self._unsharded_param)
534
+ self._setattr_on_modules(self._unsharded_param)
535
+ if self.sharded_state == ShardedState.SHARDED_POST_FORWARD:
536
+ # The data is allocated in the default stream via the post-forward
537
+ # reshard and must be kept alive for the next all-gather copy-in.
538
+ # Since we call this method after the copy-out, the data's lifetime
539
+ # is ensured without further synchronization.
540
+ self._sharded_post_forward_param = None
541
+ self._sharded_post_forward_param_data = None # free
542
+ self.sharded_state = ShardedState.UNSHARDED
543
+
544
+ def _setattr_on_modules(self, param: nn.Parameter) -> None:
545
+ unsafe_setattr_param(
546
+ self._module_info.module, self._module_info.param_name, param
547
+ )
548
+ for shared_module, shared_param_name in zip(
549
+ self._module_info.shared_modules, self._module_info.shared_param_names
550
+ ):
551
+ unsafe_setattr_param(shared_module, shared_param_name, param)
552
+
553
+ def to_sharded_dtensor(self, tensor: torch.Tensor) -> DTensor:
554
+ """
555
+ Converts a local tensor representing either the sharded parameter or
556
+ sharded gradient to DTensor.
557
+ """
558
+ if tensor.shape != self.sharded_size:
559
+ _raise_assert_with_print(
560
+ f"Expects size {self.sharded_size} but got {tensor.shape}"
561
+ )
562
+ return _from_local_no_grad(
563
+ tensor,
564
+ self._sharding_spec,
565
+ )
566
+
567
+ def to_sharded_post_forward_dtensor(self, tensor: torch.Tensor) -> DTensor:
568
+ if tensor.shape != self.sharded_post_forward_size:
569
+ _raise_assert_with_print(
570
+ f"Expects size {self.sharded_post_forward_size} but got {tensor.shape}"
571
+ )
572
+ assert isinstance(self.post_forward_mesh_info, HSDPMeshInfo)
573
+ # TODO: Prefer this DTensor to be read-only and generalize the
574
+ # placement once we support TP.
575
+ post_forward_sharding_spec = DTensorSpec(
576
+ self.post_forward_mesh_info.mesh,
577
+ (Replicate(), Shard(0)),
578
+ tensor_meta=self._sharding_spec.tensor_meta,
579
+ )
580
+ return _from_local_no_grad(tensor, post_forward_sharding_spec)
581
+
582
+ def to_accumulated_grad_if_needed(self) -> None:
583
+ # Access `_unsharded_param` to bypass the sharded state check since we
584
+ # prefer to reshard before upcasting the gradient to save memory
585
+ if (
586
+ self.reduce_dtype is None
587
+ or self._unsharded_param.grad is None
588
+ or self._unsharded_param.grad.dtype == self.reduce_dtype
589
+ ):
590
+ return
591
+ unsharded_grad = self._unsharded_param.grad
592
+ self._unsharded_param.grad = None
593
+ self.unsharded_accumulated_grad = unsharded_grad.to(self.reduce_dtype)
594
+
595
+ def accumulate_unsharded_grad_if_needed(self) -> None:
596
+ if (
597
+ self.unsharded_accumulated_grad is not None
598
+ and self.unsharded_param.grad is not None
599
+ ):
600
+ self.unsharded_accumulated_grad += self.unsharded_param.grad
601
+ self.unsharded_param.grad = None
602
+
603
+ def alloc_all_gather_outputs(self) -> None:
604
+ for tensor in self.all_gather_outputs:
605
+ alloc_storage(tensor)
606
+
607
+ def free_unsharded_param(self) -> None:
608
+ for tensor in itertools.chain(
609
+ self.all_gather_outputs, self._unsharded_inner_tensors
610
+ ):
611
+ free_storage(tensor)
612
+ if ca.compiled_autograd_enabled:
613
+ self.all_gather_outputs = []
614
+ self._unsharded_inner_tensors = []
615
+
616
+ @property
617
+ def all_gather_inputs(self) -> List[torch.Tensor]: # 1D
618
+ self._assert_in_states(ShardedState.SHARDED, ShardedState.SHARDED_POST_FORWARD)
619
+ if self.sharded_state == ShardedState.SHARDED:
620
+ if not ca.compiled_autograd_enabled and hasattr(
621
+ self._sharded_local_tensor, "fsdp_pre_all_gather"
622
+ ):
623
+ sharded_local_tensor = self._sharded_local_tensor
624
+ if self.offload_to_cpu:
625
+ sharded_local_tensor = sharded_local_tensor.to(
626
+ self.device, non_blocking=True
627
+ )
628
+ (
629
+ all_gather_inputs,
630
+ self._extensions_data.all_gather_metadata,
631
+ ) = sharded_local_tensor.fsdp_pre_all_gather(self.mesh_info.mesh)
632
+ self._extensions_data.all_gather_input_sizes = [
633
+ t.size() for t in all_gather_inputs
634
+ ]
635
+ return [t.view(-1) for t in all_gather_inputs]
636
+ sharded_param_data = self._sharded_param_data
637
+ if self.offload_to_cpu:
638
+ sharded_param_data = sharded_param_data.to(
639
+ self.device, non_blocking=True
640
+ )
641
+ return [_to_dtype_if_needed(sharded_param_data, self.param_dtype)]
642
+ elif self.sharded_state == ShardedState.SHARDED_POST_FORWARD:
643
+ if not ca.compiled_autograd_enabled and hasattr(
644
+ self._sharded_local_tensor, "fsdp_pre_all_gather"
645
+ ):
646
+ raise NotImplementedError
647
+ all_gather_input = _to_dtype_if_needed(
648
+ cast(torch.Tensor, self._sharded_post_forward_param_data),
649
+ self.param_dtype,
650
+ )
651
+ return [all_gather_input]
652
+ return [torch.empty(0)] # mypy
653
+
654
+ @property
655
+ def unsharded_param(self) -> nn.Parameter: # ND
656
+ self._assert_in_states(ShardedState.UNSHARDED)
657
+ return self._unsharded_param
658
+
659
+ @property
660
+ def unsharded_grad_data(self) -> torch.Tensor:
661
+ grad = self.unsharded_param.grad
662
+ assert grad is not None, "Expects unsharded_param.grad to not be None"
663
+ return self._get_grad_inner_tensor(grad)
664
+
665
+ @property
666
+ def unsharded_accumulated_grad_data(self) -> torch.Tensor:
667
+ grad = self.unsharded_accumulated_grad
668
+ assert grad is not None, "Expects unsharded_accumulated_grad to not be None"
669
+ return self._get_grad_inner_tensor(grad)
670
+
671
+ def _get_grad_inner_tensor(self, grad: torch.Tensor) -> torch.Tensor:
672
+ if self.is_dtensor:
673
+ if isinstance(grad, AsyncCollectiveTensor):
674
+ grad = grad.wait()
675
+ assert isinstance(grad, DTensor), f"{type(grad)}"
676
+ if any(pl.is_partial() for pl in grad.placements):
677
+ placements = [
678
+ Replicate() if pl.is_partial() else pl for pl in grad.placements
679
+ ]
680
+ grad = grad.redistribute(placements=placements)
681
+ grad = grad._local_tensor
682
+ return grad
683
+
684
+ @property
685
+ def _sharded_local_tensor(self) -> torch.Tensor:
686
+ return cast(DTensor, self.sharded_param)._local_tensor
687
+
688
+ def _assert_in_states(self, *states: ShardedState) -> None:
689
+ if self.sharded_state not in states:
690
+ _raise_assert_with_print(
691
+ f"Expects to be in one of {states}, not {self.sharded_state}"
692
+ )
693
+
694
+ def reset_sharded_param(self):
695
+ # For ops like `nn.Module._apply` or `load_state_dict(assign=True)`
696
+ # that change the sharded parameter tensor, we may need to re-pad the
697
+ # sharded local tensor and re-save the reference.
698
+ module_info = self._module_info
699
+ new_param = getattr(module_info.module, module_info.param_name)
700
+ if new_param is not self.sharded_param:
701
+ if torch.__future__.get_swap_module_params_on_conversion():
702
+ raise AssertionError(
703
+ f"Expects swap_tensors to preserve object but got {new_param} "
704
+ f"instead of {self.sharded_param}"
705
+ )
706
+ self.sharded_param = new_param
707
+ local_tensor = new_param._local_tensor
708
+ if local_tensor.is_meta:
709
+ return
710
+ padded_sharded_size = self.padded_sharded_param_size
711
+ if local_tensor.size() != padded_sharded_size:
712
+ padded_local_tensor = local_tensor.new_zeros(padded_sharded_size)
713
+ padded_local_tensor[: local_tensor.size(0)].copy_(local_tensor)
714
+ local_tensor = padded_local_tensor
715
+ if self.pin_memory and not local_tensor.is_pinned():
716
+ local_tensor = local_tensor.cpu().pin_memory()
717
+ self._sharded_param_data = local_tensor.view(-1)
718
+ assert isinstance(self.sharded_param, DTensor) # mypy
719
+ self.sharded_param._local_tensor = local_tensor[: self.sharded_size[0]]
720
+
721
+ def __repr__(self):
722
+ return f"FSDPParam(fqn={self._param_fqn}, orig_size={self._orig_size})"
723
+
724
+
725
+ def alloc_storage(tensor: torch.Tensor) -> None:
726
+ size = tensor.numel() * tensor.itemsize
727
+ if (storage := tensor.untyped_storage()).size() != size:
728
+ storage.resize_(size)
729
+
730
+
731
+ def free_storage(tensor: torch.Tensor) -> None:
732
+ if (storage := tensor.untyped_storage()).size() != 0:
733
+ storage.resize_(0)
734
+
735
+
736
+ # NOTE: These bypass `nn.Module.__setattr__` checks, which incur non-trivial
737
+ # CPU overhead, if the module did not override it. For FSDP, we know we do not
738
+ # need those checks when transitioning between sharded/unsharded parameters.
739
+ def unsafe_setattr_param(
740
+ module: nn.Module, param_name: str, param: nn.Parameter
741
+ ) -> None:
742
+ if getattr(module.__setattr__, "__func__", None) is nn.Module.__setattr__:
743
+ module._parameters[param_name] = param
744
+ else: # slow path
745
+ setattr(module, param_name, param)
746
+
747
+
748
+ def set_requires_grad_if_needed(
749
+ src_tensor: torch.Tensor, dst_tensor: torch.Tensor
750
+ ) -> None:
751
+ # Only call `requires_grad_` if needed to avoid the Python <> C++ context
752
+ # switch overhead
753
+ if src_tensor.requires_grad != dst_tensor.requires_grad:
754
+ dst_tensor.requires_grad_(src_tensor.requires_grad)
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_param_group.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import contextlib
3
+ import logging
4
+ from typing import Any, cast, Dict, List, NamedTuple, Optional, Set, Tuple
5
+
6
+ import torch
7
+ import torch._dynamo.compiled_autograd as ca
8
+ import torch.distributed as dist
9
+ import torch.nn as nn
10
+ from torch.distributed.fsdp._common_utils import _named_parameters_with_duplicates
11
+ from torch.profiler import record_function
12
+ from torch.utils._pytree import tree_flatten, tree_unflatten
13
+ from torch.utils.hooks import RemovableHandle
14
+
15
+ from ._fsdp_api import MixedPrecisionPolicy, OffloadPolicy
16
+ from ._fsdp_collectives import (
17
+ AllGatherResult,
18
+ foreach_all_gather,
19
+ foreach_all_gather_copy_out,
20
+ foreach_reduce,
21
+ )
22
+ from ._fsdp_common import FSDPMeshInfo, HSDPMeshInfo, TrainingState
23
+ from ._fsdp_param import FSDPParam, ParamModuleInfo, ShardedState
24
+
25
+
26
+ logger = logging.getLogger("torch.distributed._composable.fsdp")
27
+
28
+ _ModuleToHandleDict = Dict[nn.Module, RemovableHandle] # for state dict
29
+
30
+
31
+ """
32
+ [Note: Overlapping all-gather copy-in and all-gather]
33
+ For implicit forward prefetching, we want to overlap the next copy-in with the
34
+ current all-gather. We do so using a separate copy-in stream. However, since
35
+ we have the all-gather input as a view into the output, we must make sure to
36
+ copy into different memory from the current all-gather's output. Thus, we keep
37
+ a reference to the current all-gather's output and have the next FSDP parameter
38
+ group free it after its copy-in. Finally, we have the last FSDP state flush the
39
+ reference to avoid holding onto memory after forward.
40
+ """
41
+
42
+
43
+ class FSDPCommContext:
44
+ """This has the communication state shared across FSDP states/parameter groups."""
45
+
46
+ def lazy_init(self):
47
+ if not torch.cuda.is_available():
48
+ raise RuntimeError("FSDP requires CUDA for streams")
49
+ # Setting the all-gather/reduce-scatter streams to be higher priority
50
+ # can help avoid some issues where their copies in/out are delayed and
51
+ # block computation (this is different from high-pri NCCL streams)
52
+ high_priority = -1
53
+ # All-gather state and copy-in stream allow overlapping the next
54
+ # copy-in with the current all-gather in forward; copy-in overlaps with
55
+ # reduce-scatter in backward without the separate copy-in stream
56
+ self.all_gather_copy_in_stream = torch.cuda.Stream(priority=high_priority)
57
+ # All-gather stream allows overlapping next all-gather with current
58
+ # forward compute
59
+ self.all_gather_stream = torch.cuda.Stream(priority=high_priority)
60
+ # Reduce-scatter stream gives separate execution "thread" for post-
61
+ # backward logic like pre/post-gradient division and reduce-scatter
62
+ self.reduce_scatter_stream = torch.cuda.Stream(priority=high_priority)
63
+ # Run the HSDP all-reduces concurrently with all-gather/reduce-scatter
64
+ # since collectives use different network resources and can overlap
65
+ # in the typical intra-node sharding / inter-node replication case
66
+ self.all_reduce_stream = torch.cuda.Stream()
67
+ # All-gather/reduce-scatter states keep references to collective
68
+ # tensors produced in one stream and used in another and accompanying
69
+ # CUDA events for synchronization
70
+ self.all_gather_state: Optional[AllGatherState] = None
71
+ self.reduce_scatter_state: Optional[ReduceScatterState] = None
72
+ # Post-forward order for explicit backward prefetching
73
+ self.post_forward_order: List[FSDPParamGroup] = [] # will cause ref cycles
74
+
75
+ def get_all_gather_streams(
76
+ self, training_state: TrainingState
77
+ ) -> Tuple[torch.cuda.Stream, torch.cuda.Stream]:
78
+ if training_state in (TrainingState.FORWARD, TrainingState.PRE_BACKWARD):
79
+ # Use separate streams for implicit prefetching
80
+ return self.all_gather_copy_in_stream, self.all_gather_stream
81
+ current_stream = torch.cuda.current_stream()
82
+ return current_stream, current_stream
83
+
84
+
85
+ # See [Note: Overlapping all-gather copy-in and all-gather]
86
+ class AllGatherState(NamedTuple):
87
+ all_gather_result: AllGatherResult
88
+ event: torch.cuda.Event # all-gather copy-out
89
+
90
+
91
+ class ReduceScatterState(NamedTuple):
92
+ reduce_scatter_input: torch.Tensor
93
+ event: torch.cuda.Event # reduce-scatter event
94
+
95
+
96
+ class FSDPParamGroup:
97
+ """This class represents a parameter group to communicate together."""
98
+
99
+ _orig_dtype: torch.dtype
100
+ _reduce_dtype: Optional[torch.dtype]
101
+
102
+ def __init__(
103
+ self,
104
+ params: List[nn.Parameter],
105
+ modules: Tuple[nn.Module, ...],
106
+ mesh_info: FSDPMeshInfo,
107
+ post_forward_mesh_info: Optional[FSDPMeshInfo],
108
+ device: torch.device,
109
+ mp_policy: MixedPrecisionPolicy,
110
+ offload_policy: OffloadPolicy,
111
+ ):
112
+ self.modules = modules # permit ref cycle because 1:1 lifetime
113
+ param_module_infos = _get_param_module_infos(params, modules)
114
+ self.fsdp_params = [
115
+ FSDPParam(
116
+ param,
117
+ module_info,
118
+ mesh_info,
119
+ post_forward_mesh_info,
120
+ device,
121
+ mp_policy,
122
+ offload_policy,
123
+ )
124
+ for param, module_info in zip(params, param_module_infos)
125
+ ]
126
+ self.mesh_info = mesh_info
127
+ self.post_forward_mesh_info = post_forward_mesh_info
128
+ self.device = device
129
+ self.mp_policy = mp_policy
130
+ self._training_state = TrainingState.IDLE
131
+ # Group's sharded state always matches its parameters' sharded states
132
+ self._sharded_state = ShardedState.SHARDED
133
+ self._module_fqn: Optional[str] = None # prefixed from root module
134
+ # Only consider resetting sharded parameters once in lazy init since it
135
+ # can incur nontrivial overhead to reset them
136
+ self._reset_sharded_params: bool = False
137
+
138
+ # - Hook state
139
+ self._module_to_pre_save_state_dict_hook_handle: _ModuleToHandleDict = {}
140
+ self._module_to_pre_load_state_dict_hook_handle: _ModuleToHandleDict = {}
141
+
142
+ # - Communication and communication/computation overlap
143
+ self.comm_ctx = FSDPCommContext()
144
+ # Group's indices in the shared post-forward order
145
+ self._post_forward_indices: List[int] = []
146
+ # Whether to reduce gradients at all (whether for FSDP or HSDP)
147
+ self.reduce_grads: bool = True
148
+ # Whether to all-reduce gradients for HSDP; only used if
149
+ # `self.reduce_grads` is true, in which case setting this to false
150
+ # means reduce-scatter but no all-reduce
151
+ self.all_reduce_grads: bool = True
152
+ # Whether to reshard parameters after backward (only useful for
153
+ # gradient accumulation)
154
+ self.reshard_after_backward: bool = True
155
+ # Optional custom reduce-scatter reduce op (e.g. to divide by a
156
+ # factor other than the shard world size)
157
+ self.reduce_scatter_reduce_op: Optional[dist.ReduceOp] = None
158
+
159
+ # - CUDA events for stream synchronization
160
+ # Holds the all-gather output buffer, sync objects, and metadata
161
+ self._all_gather_result: Optional[AllGatherResult] = None
162
+ # Holds the reduce-scatter/all-reduce view-out CUDA event that marks the end of
163
+ # the group's post-backward (e.g. reduce-scatter, all-reduce and div), which
164
+ # should be waited on at the end of backward
165
+ self._post_reduce_event: Optional[torch.cuda.Event] = None
166
+ # Holds the reshard-after-forward CUDA event when resharding to a
167
+ # different world size, which should be waited on in the next unshard
168
+ self._reshard_after_forward_event: Optional[torch.cuda.Event] = None
169
+
170
+ # Only for HSDP, if accumulating gradients without all-reduce, save the
171
+ # partial reduce output (only reduce-scattered but not all-reduced)
172
+ self._partial_reduce_output: Optional[torch.Tensor] = None
173
+
174
+ # Initialization #
175
+ def _init_mp_dtypes(self) -> None:
176
+ for fsdp_param in self.fsdp_params:
177
+ fsdp_param.init_dtype_attrs(self.mp_policy)
178
+ orig_dtypes = {fsdp_param.orig_dtype for fsdp_param in self.fsdp_params}
179
+ if len(orig_dtypes) != 1:
180
+ # This can be relaxed if we copy-out for the reduce-scatter
181
+ raise AssertionError(
182
+ f"FSDP expects uniform original parameter dtype but got {orig_dtypes}"
183
+ )
184
+ self._orig_dtype = next(iter(orig_dtypes))
185
+ reduce_dtypes = {fsdp_param.reduce_dtype for fsdp_param in self.fsdp_params}
186
+ if len(reduce_dtypes) != 1:
187
+ # This can be relaxed if we issue one reduce-scatter per reduce
188
+ # dtype (but we would need a way for users to specify multiple
189
+ # reduce dtypes)
190
+ raise AssertionError(
191
+ f"FSDP expects uniform reduce dtype but got {reduce_dtypes}"
192
+ )
193
+ self._reduce_dtype = next(iter(reduce_dtypes))
194
+
195
+ def lazy_init(self):
196
+ # Lazy init should be idempotent
197
+ # Users may change or register parameters after construction time.
198
+ # For example, DoRA (https://arxiv.org/abs/2402.09353) initializes linear magnitudes based on
199
+ # other parameters (e.g. loaded from the state dict).
200
+ if self.is_sharded and not self._reset_sharded_params:
201
+ for fsdp_param in self.fsdp_params:
202
+ fsdp_param.reset_sharded_param()
203
+ self._reset_sharded_params = True
204
+ param_names_on_meta = [
205
+ fsdp_param._param_fqn
206
+ for fsdp_param in self.fsdp_params
207
+ if fsdp_param.sharded_param.device.type == "meta"
208
+ ]
209
+ if param_names_on_meta:
210
+ raise RuntimeError(
211
+ "FSDP parameters should be materialized from meta device before training, "
212
+ f"but the following were still on meta device: {param_names_on_meta}\n"
213
+ "For example, call module.to_empty(device) to materialize to device and "
214
+ "call module.reset_parameters() on each module to initialize values."
215
+ )
216
+ # Initialize mixed precision attributes lazily in case the user changes
217
+ # the parameter dtypes after construction time but before forward
218
+ self._init_mp_dtypes()
219
+ self._register_state_dict_hooks()
220
+
221
+ # Runtime #
222
+ def unshard(self, async_op: bool = False):
223
+ if self._all_gather_result is not None: # already called, pending wait
224
+ return
225
+ if self.is_unsharded:
226
+ return # no-op
227
+ if self._reshard_after_forward_event is not None:
228
+ # Resharded parameter data is allocated in the default stream and
229
+ # used in the all-gather streams
230
+ self._wait_all_gather_streams_on_event(self._reshard_after_forward_event)
231
+ self._reshard_after_forward_event = None
232
+ with record_function(self._with_fqn("FSDP::all_gather")):
233
+ self._all_gather_result = foreach_all_gather(
234
+ self.fsdp_params,
235
+ self._all_gather_process_group,
236
+ async_op,
237
+ *self.comm_ctx.get_all_gather_streams(self._training_state),
238
+ self.device,
239
+ )
240
+
241
+ def wait_for_unshard(self):
242
+ """
243
+ 1. In forward with implict prefetching, to overlap the current copy-out
244
+ with the next all-gather, we save a reference to the current all-gather
245
+ result to free after the next copy-out.
246
+ 2. Otherwise (explicit prefetching or in backward), we free the
247
+ all-gather result immediately after the current copy-out since we can
248
+ already overlap the current copy-out with the previous reduce-scatter.
249
+ """
250
+ if not self._all_gather_result:
251
+ return # no preceding unshard
252
+ if self._training_state == TrainingState.FORWARD: # implicit prefetch
253
+ if prev_all_gather_state := self.comm_ctx.all_gather_state:
254
+ self._wait_all_gather_streams_on_event(prev_all_gather_state.event)
255
+ self.comm_ctx.all_gather_state = None # free the all-gather result
256
+ with record_function(self._with_fqn("FSDP::all_gather_copy_out")):
257
+ foreach_all_gather_copy_out(
258
+ self._all_gather_result,
259
+ self.fsdp_params,
260
+ self._all_gather_process_group,
261
+ )
262
+ for fsdp_param in self.fsdp_params:
263
+ fsdp_param.init_unsharded_param()
264
+ self._to_unsharded()
265
+ all_gather_copy_out_event = torch.cuda.Event()
266
+ all_gather_copy_out_event.record()
267
+ if self._training_state == TrainingState.FORWARD:
268
+ self.comm_ctx.all_gather_state = AllGatherState(
269
+ self._all_gather_result, all_gather_copy_out_event
270
+ )
271
+ else:
272
+ self._wait_all_gather_streams_on_event(all_gather_copy_out_event)
273
+ self._all_gather_result = None # free unless saved in `all_gather_state`
274
+
275
+ def _wait_all_gather_streams_on_event(self, event: torch.cuda.Event):
276
+ # Calling `unshard` before lazy init means streams are not initialized
277
+ if hasattr(self.comm_ctx, "all_gather_copy_in_stream"):
278
+ self.comm_ctx.all_gather_copy_in_stream.wait_event(event)
279
+ if hasattr(self.comm_ctx, "all_gather_stream"):
280
+ self.comm_ctx.all_gather_stream.wait_event(event)
281
+
282
+ def reshard(self):
283
+ if self._training_state == TrainingState.FORWARD:
284
+ if not self._reshard_after_forward:
285
+ return
286
+ if self._use_post_forward_mesh:
287
+ self._to_sharded_post_forward()
288
+ self._reshard_after_forward_event = torch.cuda.Event()
289
+ self._reshard_after_forward_event.record()
290
+ return
291
+ self._to_sharded()
292
+
293
+ def pre_forward(
294
+ self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
295
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
296
+ if not ca.compiled_autograd_enabled:
297
+ logger.debug("%s", self._with_fqn("FSDP::pre_forward"))
298
+ with record_function(self._with_fqn("FSDP::pre_forward")):
299
+ self._training_state = TrainingState.FORWARD
300
+ self.unshard()
301
+ self.wait_for_unshard()
302
+ args, kwargs = self._register_post_backward_hook(args, kwargs)
303
+ return args, kwargs
304
+
305
+ def post_forward(self, module: nn.Module, input: Any, output: Any):
306
+ if not ca.compiled_autograd_enabled:
307
+ logger.debug("%s", self._with_fqn("FSDP::post_forward"))
308
+ with record_function(self._with_fqn("FSDP::post_forward")):
309
+ self.reshard()
310
+ self._record_post_forward()
311
+ self._training_state = TrainingState.IDLE
312
+ return output
313
+
314
+ def _record_post_forward(self) -> None:
315
+ # Since a group has one pre-backward unshard for each forward call
316
+ # before the backward, we record each usage (with multiplicity)
317
+ post_forward_index = len(self.comm_ctx.post_forward_order)
318
+ self.comm_ctx.post_forward_order.append(self)
319
+ self._post_forward_indices.append(post_forward_index)
320
+
321
+ def pre_backward(self, default_prefetch: bool, *unused: Any):
322
+ if self._training_state == TrainingState.PRE_BACKWARD:
323
+ return
324
+ if not ca.compiled_autograd_enabled:
325
+ logger.debug("%s", self._with_fqn("FSDP::pre_backward"))
326
+ with record_function(self._with_fqn("FSDP::pre_backward")):
327
+ self._training_state = TrainingState.PRE_BACKWARD
328
+ self.unshard() # no-op if prefetched
329
+ self.wait_for_unshard()
330
+ if default_prefetch and not ca.compiled_autograd_enabled:
331
+ self._backward_prefetch()
332
+
333
+ def post_backward(self, *unused: Any):
334
+ if not ca.compiled_autograd_enabled:
335
+ logger.debug("%s", self._with_fqn("FSDP::post_backward"))
336
+ self._training_state = TrainingState.POST_BACKWARD
337
+ with record_function(self._with_fqn("FSDP::post_backward_accumulate")):
338
+ for fsdp_param in self.fsdp_params:
339
+ fsdp_param.accumulate_unsharded_grad_if_needed()
340
+ with record_function(self._with_fqn("FSDP::post_backward_reshard")):
341
+ if not self.reduce_grads:
342
+ if self.reshard_after_backward:
343
+ self.reshard()
344
+ for fsdp_param in self.fsdp_params:
345
+ fsdp_param.to_accumulated_grad_if_needed()
346
+ return
347
+ # Save the autograd-computed gradients before resharding to only
348
+ # access the unsharded parameters when their data is present
349
+ fsdp_params_with_grad: List[FSDPParam] = []
350
+ unsharded_grads: List[torch.Tensor] = []
351
+ for fsdp_param in self.fsdp_params:
352
+ # May have an accumulated gradient of the reduce dtype if the
353
+ # previous backward did not reduce-scatter
354
+ if fsdp_param.unsharded_accumulated_grad is not None:
355
+ fsdp_params_with_grad.append(fsdp_param)
356
+ unsharded_grads.append(fsdp_param.unsharded_accumulated_grad_data)
357
+ fsdp_param.unsharded_accumulated_grad = None
358
+ elif fsdp_param.unsharded_param.grad is not None:
359
+ fsdp_params_with_grad.append(fsdp_param)
360
+ unsharded_grads.append(fsdp_param.unsharded_grad_data)
361
+ fsdp_param.unsharded_param.grad = None
362
+ if self.reshard_after_backward:
363
+ self.reshard()
364
+ if len(fsdp_params_with_grad) == 0:
365
+ return
366
+ with record_function(self._with_fqn("FSDP::post_backward_reduce")):
367
+ if self.comm_ctx.reduce_scatter_state is not None:
368
+ torch.cuda.current_stream().wait_event(
369
+ self.comm_ctx.reduce_scatter_state.event
370
+ )
371
+ self.comm_ctx.reduce_scatter_state = None
372
+ (
373
+ reduce_scatter_input,
374
+ reduce_scatter_event,
375
+ self._post_reduce_event,
376
+ self._partial_reduce_output,
377
+ ) = foreach_reduce(
378
+ fsdp_params_with_grad,
379
+ unsharded_grads,
380
+ self._reduce_scatter_process_group,
381
+ self.comm_ctx.reduce_scatter_stream,
382
+ self._orig_dtype,
383
+ self._reduce_dtype,
384
+ self.device,
385
+ self.reduce_scatter_reduce_op,
386
+ self._all_reduce_process_group if self._is_hsdp else None,
387
+ self.comm_ctx.all_reduce_stream,
388
+ self.all_reduce_grads,
389
+ self._partial_reduce_output,
390
+ )
391
+ self.comm_ctx.reduce_scatter_state = ReduceScatterState(
392
+ reduce_scatter_input, reduce_scatter_event
393
+ )
394
+
395
+ def finalize_backward(self):
396
+ if self._post_reduce_event is not None:
397
+ torch.cuda.current_stream().wait_event(self._post_reduce_event)
398
+ self._post_reduce_event = None
399
+ for fsdp_param in self.fsdp_params:
400
+ if fsdp_param.grad_offload_event is not None:
401
+ fsdp_param.grad_offload_event.synchronize()
402
+ fsdp_param.grad_offload_event = None
403
+ self._post_forward_indices.clear()
404
+
405
+ def _backward_prefetch(self) -> None:
406
+ if self._training_state == TrainingState.PRE_BACKWARD:
407
+ if not self._post_forward_indices:
408
+ # Can be cleared if running multiple `backward`s
409
+ return
410
+ curr_index = self._post_forward_indices.pop()
411
+ if (target_index := curr_index - 1) < 0:
412
+ return
413
+ # Prefetch naively using the reverse post-forward order, which may
414
+ # have mistargeted prefetches if not all modules used in forward
415
+ # are used in this backward
416
+ target_fsdp_param_group = self.comm_ctx.post_forward_order[target_index]
417
+ self._prefetch_unshard(target_fsdp_param_group, "backward")
418
+
419
+ @staticmethod
420
+ def _prefetch_unshard(
421
+ target_fsdp_param_group: "FSDPParamGroup", pass_type: str
422
+ ) -> None:
423
+ if pass_type == "backward":
424
+ training_state = TrainingState.PRE_BACKWARD
425
+ elif pass_type == "forward":
426
+ training_state = TrainingState.FORWARD
427
+ else:
428
+ raise ValueError(f"Unknown pass type: {pass_type}")
429
+ target_fqn = target_fsdp_param_group._module_fqn
430
+ with record_function(
431
+ f"FSDP::{pass_type}_prefetch for {target_fqn}"
432
+ ), target_fsdp_param_group.use_training_state(training_state):
433
+ target_fsdp_param_group.unshard()
434
+
435
+ # Utilities #
436
+ def _to_sharded(self):
437
+ if not self.is_sharded:
438
+ for fsdp_param in self.fsdp_params:
439
+ fsdp_param.to_sharded()
440
+ self._sharded_state = ShardedState.SHARDED
441
+
442
+ def _to_sharded_post_forward(self):
443
+ if not self.is_sharded_post_forward:
444
+ for fsdp_param in self.fsdp_params:
445
+ fsdp_param.to_sharded_post_forward()
446
+ self._sharded_state = ShardedState.SHARDED_POST_FORWARD
447
+
448
+ def _to_unsharded(self):
449
+ if not self.is_unsharded:
450
+ for fsdp_param in self.fsdp_params:
451
+ fsdp_param.to_unsharded()
452
+ self._sharded_state = ShardedState.UNSHARDED
453
+
454
+ @property
455
+ def is_sharded(self) -> bool:
456
+ return self._sharded_state == ShardedState.SHARDED
457
+
458
+ @property
459
+ def is_sharded_post_forward(self) -> bool:
460
+ return self._sharded_state == ShardedState.SHARDED_POST_FORWARD
461
+
462
+ @property
463
+ def is_unsharded(self) -> bool:
464
+ return self._sharded_state == ShardedState.UNSHARDED
465
+
466
+ @contextlib.contextmanager
467
+ def use_training_state(self, training_state: TrainingState):
468
+ old_training_state = self._training_state
469
+ self._training_state = training_state
470
+ try:
471
+ yield
472
+ finally:
473
+ self._training_state = old_training_state
474
+
475
+ # Hook Registration #
476
+ def _register_post_backward_hook(
477
+ self, args: Tuple[Any, ...], kwargs: Dict[str, Any]
478
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
479
+ # Compile relies on `root_post_backward_callback` to call each
480
+ # `FSDPParamGroup.post_backward`
481
+ if ca.compiled_autograd_enabled:
482
+ return args, kwargs
483
+ if not torch.is_grad_enabled():
484
+ return args, kwargs
485
+ args_list, args_spec = tree_flatten(args)
486
+ kwargs_list, kwargs_spec = tree_flatten(kwargs)
487
+ args_kwargs_list = list(args_list) + list(kwargs_list)
488
+ inp_tensor_indices: List[int] = []
489
+ inp_tensors: List[torch.Tensor] = []
490
+ for i, obj in enumerate(args_kwargs_list):
491
+ if torch.is_tensor(obj) and obj.requires_grad:
492
+ inp_tensor_indices.append(i)
493
+ inp_tensors.append(obj)
494
+ if len(inp_tensors) == 0:
495
+ return args, kwargs # no tensors that require gradients
496
+ inp_tensors = RegisterPostBackwardFunction.apply(self, *inp_tensors)
497
+ for inp_tensor_idx, inp_tensor in zip(inp_tensor_indices, inp_tensors):
498
+ args_kwargs_list[inp_tensor_idx] = inp_tensor
499
+ args_list = args_kwargs_list[: len(args_list)]
500
+ kwargs_list = args_kwargs_list[len(args_list) :]
501
+ args = tree_unflatten(args_list, args_spec)
502
+ kwargs = tree_unflatten(kwargs_list, kwargs_spec)
503
+ return args, kwargs
504
+
505
+ def _register_state_dict_hooks(self) -> None:
506
+ num_pre_save_hooks = len(self._module_to_pre_save_state_dict_hook_handle)
507
+ num_pre_load_hooks = len(self._module_to_pre_load_state_dict_hook_handle)
508
+ assert (
509
+ num_pre_save_hooks == num_pre_load_hooks
510
+ ), f"Pre-save: {num_pre_save_hooks} pre-load: {num_pre_load_hooks}"
511
+ if num_pre_save_hooks > 0:
512
+ return # already registered
513
+ modules_with_fsdp_params: Set[nn.Module] = {
514
+ fsdp_param._module_info.module for fsdp_param in self.fsdp_params
515
+ }
516
+
517
+ def to_sharded_hook(*args: Any, **kwargs: Any) -> None:
518
+ self._to_sharded()
519
+
520
+ for module in modules_with_fsdp_params:
521
+ self._module_to_pre_save_state_dict_hook_handle[
522
+ module
523
+ ] = module.register_state_dict_pre_hook(to_sharded_hook)
524
+ self._module_to_pre_load_state_dict_hook_handle[
525
+ module
526
+ ] = module._register_load_state_dict_pre_hook(to_sharded_hook)
527
+
528
+ # Properties #
529
+ @property
530
+ def _reshard_after_forward(self) -> bool:
531
+ return self.post_forward_mesh_info is not None
532
+
533
+ @property
534
+ def _use_post_forward_mesh(self) -> bool:
535
+ return (
536
+ self._reshard_after_forward
537
+ and self.mesh_info != self.post_forward_mesh_info
538
+ )
539
+
540
+ @property
541
+ def _is_hsdp(self) -> bool:
542
+ return isinstance(self.mesh_info, HSDPMeshInfo)
543
+
544
+ @property
545
+ def _all_gather_process_group(self) -> dist.ProcessGroup:
546
+ mesh_info = (
547
+ cast(FSDPMeshInfo, self.post_forward_mesh_info)
548
+ if self.is_sharded_post_forward
549
+ else self.mesh_info
550
+ )
551
+ assert isinstance(mesh_info, FSDPMeshInfo)
552
+ return mesh_info.shard_process_group
553
+
554
+ @property
555
+ def _reduce_scatter_process_group(self) -> dist.ProcessGroup:
556
+ assert isinstance(self.mesh_info, FSDPMeshInfo)
557
+ return self.mesh_info.shard_process_group
558
+
559
+ @property
560
+ def _all_reduce_process_group(self) -> dist.ProcessGroup:
561
+ assert isinstance(self.mesh_info, HSDPMeshInfo)
562
+ return self.mesh_info.replicate_process_group
563
+
564
+ def _with_fqn(self, label: str) -> str:
565
+ if self._module_fqn:
566
+ return f"{label} ({self._module_fqn})"
567
+ return label
568
+
569
+ def __repr__(self):
570
+ return f"FSDPParamGroup(fqn={self._module_fqn})"
571
+
572
+
573
+ def _get_param_module_infos(
574
+ params: List[nn.Parameter], modules: Tuple[nn.Module, ...]
575
+ ) -> List[ParamModuleInfo]:
576
+ """
577
+ Shared parameter: lin1.weight = lin2.weight
578
+ Shared module: mlp.lin1 = mlp.lin2
579
+ We do not remove duplicates when traversing both modules and parameters to
580
+ find shared modules' parameters and shared parameters within a module.
581
+ """
582
+ params_set = set(params)
583
+ param_to_module_info: Dict[nn.Parameter, ParamModuleInfo] = {}
584
+ for module in modules:
585
+ for _, submodule in module.named_modules(remove_duplicate=False):
586
+ for param_name, param in _named_parameters_with_duplicates(
587
+ submodule, recurse=False
588
+ ):
589
+ if param in params_set:
590
+ if param not in param_to_module_info:
591
+ param_to_module_info[param] = ParamModuleInfo(
592
+ submodule, param_name
593
+ )
594
+ else:
595
+ param_to_module_info[param].shared_modules.append(submodule)
596
+ param_to_module_info[param].shared_param_names.append(
597
+ param_name
598
+ )
599
+ if len(param_to_module_info) != len(params):
600
+ raise AssertionError(f"Some parameters are not in the module tree of {module}")
601
+ return [param_to_module_info[param] for param in params]
602
+
603
+
604
+ class RegisterPostBackwardFunction(torch.autograd.Function):
605
+ @staticmethod
606
+ def forward(ctx, param_group: FSDPParamGroup, *inputs: torch.Tensor):
607
+ # All tensors in `inputs` should require gradient
608
+ ctx.param_group = param_group
609
+ return inputs
610
+
611
+ @staticmethod
612
+ def backward(ctx, *grads: torch.Tensor):
613
+ ctx.param_group.post_backward()
614
+ return (None,) + grads
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/_fsdp_state.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ # mypy: allow-untyped-defs
3
+ import functools
4
+ import logging
5
+ from typing import (
6
+ Any,
7
+ Callable,
8
+ Dict,
9
+ List,
10
+ Optional,
11
+ Sequence,
12
+ Set,
13
+ Tuple,
14
+ TYPE_CHECKING,
15
+ )
16
+
17
+ import torch
18
+ import torch._dynamo.compiled_autograd as ca
19
+ import torch.nn as nn
20
+ from torch._logging import warning_once
21
+ from torch.autograd import Variable
22
+ from torch.autograd.graph import _MultiHandle
23
+ from torch.distributed._composable_state import (
24
+ _get_module_state,
25
+ _insert_module_state,
26
+ _State,
27
+ )
28
+ from torch.distributed.utils import _to_kwargs
29
+ from torch.utils._pytree import tree_flatten, tree_map
30
+
31
+ from ._fsdp_api import MixedPrecisionPolicy
32
+ from ._fsdp_common import _cast_fp_tensor, TrainingState
33
+ from ._fsdp_param_group import FSDPCommContext, FSDPParamGroup
34
+
35
+
36
+ if TYPE_CHECKING:
37
+ from ._fsdp_param import FSDPParam
38
+
39
+
40
+ logger = logging.getLogger("torch.distributed._composable.fsdp")
41
+
42
+
43
+ class FSDPStateContext:
44
+ """This has state shared across FSDP states."""
45
+
46
+ def __init__(self) -> None:
47
+ # All FSDP states in the root state's module tree
48
+ self.all_states: List[FSDPState] = []
49
+ # Iteration's forward root runs the once-per-forward logic; this root
50
+ # may not be the overall root set by lazy initialization in cases where
51
+ # only a submodule runs forward (e.g. encoder-only for eval)
52
+ self.iter_forward_root: Optional[FSDPState] = None
53
+ # Final callback should only be queued once per backward
54
+ self.post_backward_final_callback_queued: bool = False
55
+ # Whether to finalize backward in this backward's final callback
56
+ self.is_last_backward: bool = True
57
+ # Optional user-provided event recorded after optimizer for the
58
+ # all-gather streams to wait on in the root pre-forward
59
+ self.post_optim_event: Optional[torch.cuda.Event] = None
60
+
61
+
62
+ def disable_if_config_true(func):
63
+ @functools.wraps(func)
64
+ def fsdp_hook_wrapper(*args, **kwargs):
65
+ if torch._dynamo.config.skip_fsdp_hooks:
66
+ return torch._dynamo.disable(func, recursive=True)(*args, **kwargs)
67
+ else:
68
+ return func(*args, **kwargs)
69
+
70
+ return fsdp_hook_wrapper
71
+
72
+
73
+ class FSDPState(_State):
74
+ def __init__(self) -> None:
75
+ super().__init__()
76
+ self._fsdp_param_group: Optional[FSDPParamGroup] = None
77
+ self._is_root: Optional[bool] = None # root set during lazy init
78
+ self._state_ctx = FSDPStateContext()
79
+ self._comm_ctx = FSDPCommContext()
80
+ self._training_state: TrainingState = TrainingState.IDLE
81
+ self._states_to_forward_prefetch: List[FSDPState] = []
82
+ self._states_to_backward_prefetch: List[FSDPState] = []
83
+ self._modules_to_run_forward: Set[nn.Module] = set()
84
+
85
+ # Define a separate init since `__init__` is called in the contract
86
+ def init(
87
+ self,
88
+ modules: Tuple[nn.Module, ...],
89
+ device: torch.device,
90
+ mp_policy: MixedPrecisionPolicy,
91
+ ) -> None:
92
+ for module in modules:
93
+ _insert_module_state(module, self)
94
+ self._modules = modules
95
+ self._device = device
96
+ self._mp_policy = mp_policy
97
+ if len(modules) == 1:
98
+ self._pre_forward_hook_handle = modules[0].register_forward_pre_hook(
99
+ self._pre_forward, prepend=True, with_kwargs=True
100
+ )
101
+ self._post_forward_hook_handle = modules[0].register_forward_hook(
102
+ self._post_forward, prepend=False
103
+ )
104
+ else:
105
+ hook_handle = _register_group_forward_hooks(
106
+ modules,
107
+ self._pre_forward,
108
+ self._post_forward,
109
+ self._modules_to_run_forward,
110
+ )
111
+ self._pre_forward_hook_handle = hook_handle
112
+ self._post_forward_hook_handle = hook_handle
113
+
114
+ def _root_pre_forward(
115
+ self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
116
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
117
+ self._lazy_init()
118
+ if self._state_ctx.iter_forward_root is not None:
119
+ return args, kwargs
120
+ if not ca.compiled_autograd_enabled:
121
+ logger.debug("FSDP::root_pre_forward")
122
+ self._state_ctx.iter_forward_root = self
123
+ with torch.profiler.record_function("FSDP::root_pre_forward"):
124
+ # Wait for optimizer before implicitly prefetched all-gathers
125
+ if (event := self._state_ctx.post_optim_event) is not None:
126
+ self._comm_ctx.all_gather_copy_in_stream.wait_event(event)
127
+ self._comm_ctx.all_gather_stream.wait_event(event)
128
+ self._state_ctx.post_optim_event = None
129
+ else:
130
+ current_stream = torch.cuda.current_stream()
131
+ self._comm_ctx.all_gather_copy_in_stream.wait_stream(current_stream)
132
+ self._comm_ctx.all_gather_stream.wait_stream(current_stream)
133
+ if self._device.type == "cuda":
134
+ with torch.profiler.record_function("FSDP::inputs_to_device"):
135
+ args_tuple, kwargs_tuple = _to_kwargs(
136
+ args, kwargs, self._device, False
137
+ ) # same as DDP
138
+ args, kwargs = args_tuple[0], kwargs_tuple[0]
139
+ return args, kwargs
140
+
141
+ def _lazy_init(self) -> None:
142
+ """
143
+ Lazy initialization represents when all modules' parallelisms have
144
+ finalized (e.g. FSDP has been applied to all desired modules). This
145
+ means that we can determine which state is the root, and we do so by
146
+ the 1st state to run forward.
147
+ """
148
+ if self._is_root is not None:
149
+ return # no-op: already initialized
150
+ self._is_root = True
151
+ if len(self._modules) > 1:
152
+ raise RuntimeError(
153
+ f"FSDP requires a single root module but got {self._modules}"
154
+ )
155
+ root_module = self._modules[0]
156
+ visited_states: Set[FSDPState] = set()
157
+ for module_name, module in root_module.named_modules():
158
+ if (state := _get_module_fsdp_state(module)) is None:
159
+ continue
160
+ if module is not root_module:
161
+ if state not in visited_states and state._is_root is not None:
162
+ raise RuntimeError(
163
+ "FSDP state has already been lazily initialized for "
164
+ f"{module_name}\nFSDP requires running forward through "
165
+ "the root module first"
166
+ )
167
+ state._is_root = False
168
+ self._state_ctx.all_states.append(state)
169
+ visited_states.add(state)
170
+ if self._fsdp_param_group:
171
+ # For the root, do not reshard after forward since for training,
172
+ # the parameters would be freed and all-gathered immediately
173
+ self._fsdp_param_group.post_forward_mesh_info = None
174
+ self._init_fqns()
175
+ self._init_shared_state()
176
+ # Run parameter group lazy inits after initializing FQNs for improved
177
+ # error messages
178
+ for state in self._state_ctx.all_states:
179
+ if state._fsdp_param_group:
180
+ state._fsdp_param_group.lazy_init()
181
+
182
+ def _init_shared_state(self) -> None:
183
+ self._comm_ctx.lazy_init()
184
+ for state in self._state_ctx.all_states:
185
+ state._state_ctx = self._state_ctx
186
+ state._comm_ctx = self._comm_ctx
187
+ if fsdp_param_group := state._fsdp_param_group:
188
+ fsdp_param_group.comm_ctx = self._comm_ctx
189
+
190
+ def _init_fqns(self) -> None:
191
+ """Sets module and parameter FQN attributes for debugging."""
192
+ assert self._is_root
193
+ root_module = self._modules[0]
194
+ param_to_fsdp_param: Dict[nn.Parameter, FSDPParam] = {}
195
+ module_to_fsdp_param_group: Dict[nn.Module, FSDPParamGroup] = {}
196
+ for state in self._state_ctx.all_states:
197
+ if fsdp_param_group := state._fsdp_param_group:
198
+ for fsdp_param in fsdp_param_group.fsdp_params:
199
+ param_to_fsdp_param[fsdp_param.sharded_param] = fsdp_param
200
+ for module in fsdp_param_group.modules:
201
+ module_to_fsdp_param_group[module] = fsdp_param_group
202
+ for param_name, param in root_module.named_parameters():
203
+ if param in param_to_fsdp_param:
204
+ param_to_fsdp_param[param]._param_fqn = param_name
205
+ for module_name, module in root_module.named_modules():
206
+ if module in module_to_fsdp_param_group:
207
+ module_fqn = module_to_fsdp_param_group[module]._module_fqn
208
+ if module_fqn is None:
209
+ module_to_fsdp_param_group[module]._module_fqn = module_name
210
+ else:
211
+ assert isinstance(module_fqn, str), f"{module_fqn}"
212
+ module_fqn += f", {module_name}"
213
+ module_to_fsdp_param_group[module]._module_fqn = module_fqn
214
+
215
+ @disable_if_config_true
216
+ def _pre_forward(
217
+ self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
218
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
219
+ # When composing with module-hook-based activation checkpointing, the
220
+ # the pre-backward hook is responsible for the unshard
221
+ if self._training_state == TrainingState.PRE_BACKWARD:
222
+ return args, kwargs
223
+ self._training_state = TrainingState.FORWARD
224
+ args, kwargs = self._root_pre_forward(module, args, kwargs)
225
+ if self._mp_policy.cast_forward_inputs and self._mp_policy.param_dtype:
226
+ with torch.profiler.record_function("FSDP::cast_forward_inputs"):
227
+ cast_fn = functools.partial(
228
+ _cast_fp_tensor, self._mp_policy.param_dtype
229
+ )
230
+ args, kwargs = tree_map(cast_fn, args), tree_map(cast_fn, kwargs)
231
+ if self._fsdp_param_group:
232
+ args, kwargs = self._fsdp_param_group.pre_forward(module, args, kwargs)
233
+ for fsdp_state in self._states_to_forward_prefetch:
234
+ if (target_param_group := fsdp_state._fsdp_param_group) is not None:
235
+ FSDPParamGroup._prefetch_unshard(target_param_group, "forward")
236
+ return args, kwargs
237
+
238
+ @disable_if_config_true
239
+ def _post_forward(self, module: nn.Module, input: Any, output: Any) -> Any:
240
+ # When composing with module-hook-based activation checkpointing, the
241
+ # post-backward hook is responsible for the reshard
242
+ if self._training_state == TrainingState.PRE_BACKWARD:
243
+ return output
244
+ if self._fsdp_param_group:
245
+ output = self._fsdp_param_group.post_forward(module, input, output)
246
+ output = self._register_pre_backward_hook(output)
247
+ self._training_state = TrainingState.IDLE
248
+ if self._state_ctx.iter_forward_root is self:
249
+ if all_gather_state := self._comm_ctx.all_gather_state:
250
+ # Free the last all-gather result if needed; refer to
251
+ # [Note: Overlapping all-gather copy-in and all-gather]
252
+ self._comm_ctx.all_gather_copy_in_stream.wait_event(
253
+ all_gather_state.event
254
+ )
255
+ self._comm_ctx.all_gather_stream.wait_event(all_gather_state.event)
256
+ self._comm_ctx.all_gather_state = None # free the all-gather result
257
+ self._state_ctx.iter_forward_root = None
258
+ if self._mp_policy.output_dtype is not None:
259
+ with torch.profiler.record_function("FSDP::cast_forward_outputs"):
260
+ output = tree_map(
261
+ functools.partial(_cast_fp_tensor, self._mp_policy.output_dtype),
262
+ output,
263
+ )
264
+ return output
265
+
266
+ def _pre_backward(self, grad: torch.Tensor) -> torch.Tensor:
267
+ self._training_state = TrainingState.PRE_BACKWARD
268
+ self._register_root_post_backward_final_callback()
269
+ if self._fsdp_param_group:
270
+ default_prefetch = len(self._states_to_backward_prefetch) == 0
271
+ self._fsdp_param_group.pre_backward(default_prefetch)
272
+ for fsdp_state in self._states_to_backward_prefetch:
273
+ if (target_param_group := fsdp_state._fsdp_param_group) is not None:
274
+ FSDPParamGroup._prefetch_unshard(target_param_group, "backward")
275
+ return grad
276
+
277
+ def _root_post_backward_final_callback(self) -> None:
278
+ if not ca.compiled_autograd_enabled:
279
+ logger.debug("FSDP::root_post_backward")
280
+ with torch.profiler.record_function("FSDP::root_post_backward_callback"):
281
+ for state in self._state_ctx.all_states:
282
+ if state._fsdp_param_group and state._fsdp_param_group.is_unsharded:
283
+ # Run post-backward in case forward inputs did not require
284
+ # gradient so the autograd backward did not run
285
+ state._fsdp_param_group.post_backward()
286
+ state._training_state = TrainingState.IDLE
287
+ if state._fsdp_param_group:
288
+ state._fsdp_param_group._training_state = TrainingState.IDLE
289
+ if self._state_ctx.is_last_backward:
290
+ state._finalize_backward()
291
+ if self._state_ctx.is_last_backward:
292
+ self._comm_ctx.post_forward_order.clear()
293
+ if self._comm_ctx.reduce_scatter_state is not None:
294
+ torch.cuda.current_stream().wait_event(
295
+ self._comm_ctx.reduce_scatter_state.event
296
+ )
297
+ self._comm_ctx.reduce_scatter_state = None
298
+ self._state_ctx.post_backward_final_callback_queued = False
299
+
300
+ def _finalize_backward(self) -> None:
301
+ if self._modules_to_run_forward:
302
+ msg = (
303
+ f"{len(self._modules_to_run_forward)} of the {len(self._modules)} "
304
+ f"modules passed to fully_shard did not run forward before backward, "
305
+ "which is error-prone since FSDP post-forward/pre-backward logic "
306
+ "will not run for these modules. We recommend passing only modules "
307
+ "that run forward together. Modules that did not run forward: "
308
+ f"{list(self._modules_to_run_forward)}"
309
+ )
310
+ warning_once(logger, msg, stacklevel=2)
311
+ # Clear since we want the next forward to run
312
+ self._modules_to_run_forward.clear()
313
+ if self._fsdp_param_group:
314
+ self._fsdp_param_group.finalize_backward()
315
+
316
+ def _register_pre_backward_hook(self, output: Any) -> Any:
317
+ if not torch.is_grad_enabled():
318
+ return output
319
+ flat_outputs, _ = tree_flatten(output)
320
+ for t in flat_outputs:
321
+ if torch.is_tensor(t) and t.requires_grad:
322
+ t.register_hook(self._pre_backward)
323
+ return output
324
+
325
+ def _register_root_post_backward_final_callback(self):
326
+ if self._state_ctx.post_backward_final_callback_queued:
327
+ return
328
+ self._state_ctx.post_backward_final_callback_queued = True
329
+ Variable._execution_engine.queue_callback(
330
+ self._root_post_backward_final_callback
331
+ )
332
+
333
+
334
+ def _get_module_fsdp_state(module: nn.Module) -> Optional[FSDPState]:
335
+ state = _get_module_state(module)
336
+ if isinstance(state, FSDPState):
337
+ return state
338
+ return None
339
+
340
+
341
+ def _register_group_forward_hooks(
342
+ modules: Sequence[nn.Module],
343
+ pre_hook: Callable,
344
+ post_hook: Callable,
345
+ modules_to_run: Set[nn.Module],
346
+ ):
347
+ """
348
+ Registers group forward pre and post-hooks. The pre-hook runs upon the
349
+ first module pre-forward, and the post-hook runs upon the last. If at least
350
+ one module does not run forward, then the post-hook does not run.
351
+ """
352
+ modules_set = set(modules)
353
+
354
+ @disable_if_config_true
355
+ @functools.wraps(pre_hook)
356
+ def wrapped_pre_hook(*args: Any, **kwargs: Any):
357
+ if len(modules_to_run) == 0: # first to run
358
+ modules_to_run.update(modules_set)
359
+ return pre_hook(*args, **kwargs)
360
+
361
+ @disable_if_config_true
362
+ def get_wrapped_post_hook(module: nn.Module):
363
+ @functools.wraps(post_hook)
364
+ def wrapped_post_hook(*args: Any, **kwargs: Any):
365
+ modules_to_run.discard(module)
366
+ if len(modules_to_run) == 0:
367
+ return post_hook(*args, **kwargs)
368
+
369
+ return wrapped_post_hook
370
+
371
+ pre_handles = [
372
+ module.register_forward_pre_hook(
373
+ wrapped_pre_hook, prepend=True, with_kwargs=True
374
+ )
375
+ for module in modules
376
+ ]
377
+ post_handles = [
378
+ module.register_forward_hook(
379
+ get_wrapped_post_hook(module), prepend=False, always_call=True
380
+ )
381
+ for module in modules
382
+ ]
383
+ return _MultiHandle(tuple(pre_handles + post_handles))
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/fully_shard.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ # mypy: allow-untyped-defs
3
+ import functools
4
+ from typing import Any, cast, Dict, Iterable, List, NoReturn, Optional, Type, Union
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ from torch.distributed._composable import contract
9
+ from torch.distributed.tensor import DeviceMesh
10
+ from torch.distributed.utils import _get_root_modules
11
+
12
+ from ._fsdp_api import MixedPrecisionPolicy, OffloadPolicy
13
+ from ._fsdp_common import FSDPMeshInfo, HSDPMeshInfo
14
+ from ._fsdp_init import (
15
+ _get_device_from_mesh,
16
+ _get_managed_modules,
17
+ _get_managed_states,
18
+ _get_post_forward_mesh_info,
19
+ _init_default_fully_shard_mesh,
20
+ _move_states_to_device,
21
+ )
22
+ from ._fsdp_param_group import FSDPParamGroup
23
+ from ._fsdp_state import _get_module_fsdp_state, FSDPState
24
+
25
+
26
+ cls_to_fsdp_cls: Dict[Type, Type] = {}
27
+
28
+
29
+ # The decorator adds a state object to `module` that can be accessed via
30
+ # `fully_shard.state(module)`. The state object and module are 1:1.
31
+ @contract(state_cls=FSDPState) # type: ignore[operator]
32
+ def fully_shard(
33
+ module: Union[nn.Module, List[nn.Module]],
34
+ *,
35
+ mesh: Optional[DeviceMesh] = None,
36
+ reshard_after_forward: Union[bool, int] = True,
37
+ mp_policy: MixedPrecisionPolicy = MixedPrecisionPolicy(),
38
+ offload_policy: OffloadPolicy = OffloadPolicy(),
39
+ ):
40
+ """
41
+ Shard module parameters across data parallel workers.
42
+
43
+ This function applies fully sharded data parallelism (FSDP) or a variant to
44
+ ``module``, a technique for memory savings at the cost of communication.
45
+ Parameters are sharded across ``mesh``, and in turn, so are their gradients
46
+ and optimizer states.
47
+
48
+ The sharded parameters are all-gathered to construct the unsharded
49
+ parameters for forward or backward computation. The unsharded parameters
50
+ are freed after computation to save memory. The gradients are reduced
51
+ across the mesh and divided by the mesh size for data parallelism. The
52
+ optimizer step runs on the sharded parameters.
53
+
54
+ Each call to ``fully_shard`` constructs one communication group that
55
+ includes the parameters in ``module.parameters()`` except those already
56
+ assigned to a group from a nested call. Each group's parameters and its
57
+ gradients are communicated together in one collective, respectively.
58
+ Constructing multiple groups across the model (e.g. "layer by layer")
59
+ allows for peak memory savings and communication/computation overlap.
60
+
61
+ Implementation-wise, the sharded parameters are represented as
62
+ :class:`DTensor` s, sharded on dim-0, and the unsharded parameters are
63
+ represented as :class:`Tensor` s. A module forward pre-hook all-gathers the
64
+ parameters, and a module forward hook frees them. Similar backward hooks
65
+ gather parameters and later free parameters/reduce gradients.
66
+
67
+ Args:
68
+ module (Union[nn.Module, List[nn.Module]): The module or modules to
69
+ shard with FSDP and group together for communication.
70
+ mesh (Optional[DeviceMesh]): This data parallel mesh defines the
71
+ sharding and device. If 1D, then parameters are fully sharded
72
+ across the 1D mesh (FSDP). If 2D, then parameters are sharded
73
+ across the 0th dim and replicated across the 1st dim (HSDP). The
74
+ mesh's device type gives the device type used for communication;
75
+ if a CUDA or CUDA-like device type, then we use the current device.
76
+ reshard_after_forward (Union[bool, int]): This controls the parameter
77
+ behavior after forward and can trade off memory and communication:
78
+ - If ``True``, then this reshards parameters after forward and
79
+ all-gathers in backward.
80
+ - If ``False``, then this keeps the unsharded parameters in memory
81
+ after forward and avoids the all-gather in backward.
82
+ - If an ``int``, then this represents the world size to reshard to
83
+ after forward. It should be a non-trivial divisor of the ``mesh``
84
+ shard dim size (i.e. excluding 1 and the dim size itself). A choice
85
+ may be the intra-node size (e.g. ``torch.cuda.device_count()``).
86
+ This allows the all-gather in backward to be over a smaller world
87
+ size at the cost of higher memory usage than setting to ``True``.
88
+ - The root FSDP state has its value specially set to ``False`` as a
89
+ heuristic since its parameters would typically be immediately
90
+ all-gathered for backward.
91
+ - After forward, the parameters registered to the module depend on
92
+ to this: The registered parameters are the sharded parameters if
93
+ ``True``; unsharded parameters if ``False``; and the paramters
94
+ resharded to the smaller mesh otherwise. To modify the parameters
95
+ between forward and backward, the registered parameters must be the
96
+ sharded parameters. For ``False`` or an ``int``, this can be done
97
+ by manually resharding via :meth:`reshard`.
98
+ mp_policy (MixedPrecisionPolicy): This controls the mixed precision
99
+ policy, which offers parameter/reduction mixed precision for this
100
+ module. See :class:`MixedPrecisionPolicy` for details.
101
+ offload_policy (OffloadPolicy): This controls the offloading policy,
102
+ which offers parameter/gradient/optimizer state offloading. See
103
+ :class:`OffloadPolicy` and its subclasses for details.
104
+ """
105
+ if isinstance(module, (nn.ModuleList, nn.ModuleDict)):
106
+ raise ValueError(
107
+ f"fully_shard does not support containers that do not implement forward: {module}"
108
+ )
109
+ mesh = mesh or _init_default_fully_shard_mesh()
110
+ if mesh.ndim not in (1, 2):
111
+ raise ValueError(f"fully_shard expects a 1D or 2D DeviceMesh but got {mesh}")
112
+ elif mesh.ndim == 1:
113
+ mesh_info = FSDPMeshInfo(mesh, shard_mesh_dim=0)
114
+ else:
115
+ mesh_info = HSDPMeshInfo(mesh, shard_mesh_dim=1, replicate_mesh_dim=0)
116
+ device = _get_device_from_mesh(mesh)
117
+ post_forward_mesh_info = _get_post_forward_mesh_info(
118
+ reshard_after_forward, mesh_info
119
+ )
120
+
121
+ arg_module = module
122
+ modules = (
123
+ (module,) if isinstance(module, nn.Module) else tuple(_get_root_modules(module))
124
+ )
125
+ state = fully_shard.state(modules[0])
126
+ state.init(modules, device, mp_policy)
127
+
128
+ managed_modules = _get_managed_modules(modules)
129
+ params, buffers = _get_managed_states(managed_modules)
130
+ _move_states_to_device(params, buffers, device)
131
+ if params:
132
+ state._fsdp_param_group = FSDPParamGroup(
133
+ params,
134
+ modules,
135
+ mesh_info,
136
+ post_forward_mesh_info,
137
+ device,
138
+ mp_policy,
139
+ offload_policy,
140
+ )
141
+
142
+ # For Dynamo
143
+ for managed_module in managed_modules:
144
+ managed_module._is_fsdp_managed_module = True # type: ignore[assignment]
145
+ managed_module._fsdp_use_orig_params = True # type: ignore[assignment]
146
+
147
+ # Place FSDP leftmost for highest priority in the method resolution order
148
+ for module in modules:
149
+ cls = module.__class__
150
+ new_cls = cls_to_fsdp_cls.get(cls, None)
151
+ if not new_cls:
152
+ dct = {"__deepcopy__": unimplemented_deepcopy}
153
+ new_cls = type(f"FSDP{cls.__name__}", (FSDPModule, cls), dct)
154
+ cls_to_fsdp_cls[cls] = new_cls
155
+ module.__class__ = new_cls
156
+ return arg_module
157
+
158
+
159
+ def unimplemented_deepcopy(*args: Any, **kwargs: Any) -> NoReturn:
160
+ raise AssertionError(
161
+ "FSDP does not support deepcopy. Please use state dict for serialization."
162
+ )
163
+
164
+
165
+ class FSDPModule:
166
+ def __new__(cls, *args, **kwargs):
167
+ """
168
+ Override ``__new__`` to remove the FSDP class and directly construct
169
+ the original class for cases like indexing into a container module.
170
+ """
171
+ # Use index 2 since 0 is the dynamically constructed `FSDP<...>` class
172
+ # and index 1 is the `FSDPModule` class itself
173
+ orig_cls = cls.__mro__[2]
174
+ self = orig_cls.__new__(orig_cls, *args, **kwargs)
175
+ self.__init__(*args, **kwargs)
176
+ return self
177
+
178
+ def reshard(self) -> None:
179
+ """
180
+ Reshards the module's parameters, registering the sharded parameters
181
+ to the module and freeing the unsharded parameters if needed. This
182
+ method is *not* recursive.
183
+ """
184
+ state = self._get_fsdp_state()
185
+ if fsdp_param_group := state._fsdp_param_group:
186
+ fsdp_param_group.reshard()
187
+
188
+ def unshard(self, async_op: bool = False) -> Optional["UnshardHandle"]:
189
+ """
190
+ Unshards the module's parameters by allocating memory and all-gathering
191
+ the parameters. This method is *not* recursive.
192
+
193
+ Args:
194
+ async_op (bool): If ``True``, then returns a :class:`UnshardHandle`
195
+ that has a :meth:`wait` method to wait on the unshard op. If
196
+ ``False``, then returns ``None`` and waits on the handle inside
197
+ this function.
198
+
199
+ .. warning:: This method is experimental and subject to change.
200
+
201
+ .. note:: If ``async_op=True``, then the user does not have to call
202
+ :meth:`wait` on the returned handle if waiting on the unshard op
203
+ in the module's pre-forward is tolerable. FSDP will wait on the
204
+ pending unshard op in the pre-forward automatically.
205
+ """
206
+ state = self._get_fsdp_state()
207
+ fsdp_param_group = state._fsdp_param_group
208
+ if fsdp_param_group is not None:
209
+ fsdp_param_group.lazy_init()
210
+ fsdp_param_group.unshard(async_op=async_op)
211
+ handle = UnshardHandle(fsdp_param_group)
212
+ if async_op:
213
+ return handle
214
+ handle.wait()
215
+ return None
216
+
217
+ def set_is_last_backward(self, is_last_backward: bool) -> None:
218
+ """
219
+ Sets whether the next backward is the last one, meaning that FSDP
220
+ should wait for gradient reduction to finish and clear internal data
221
+ structures used for explicit prefetching.
222
+ """
223
+ state = self._get_fsdp_state()
224
+ state._state_ctx.is_last_backward = is_last_backward
225
+
226
+ def set_requires_gradient_sync(
227
+ self, requires_gradient_sync: bool, *, recurse: bool = True
228
+ ) -> None:
229
+ """
230
+ Sets if the module should sync gradients. This can be used to implement
231
+ gradient accumulation without communication. For HSDP, this controls
232
+ both reduce-scatter and all-reduce together.
233
+
234
+ Args:
235
+ requires_gradient_sync (bool): Whether to reduce gradients for the
236
+ module's parameters.
237
+ recurse (bool): Whether to set for all submodules or just the
238
+ passed-in module.
239
+ """
240
+ self_module = cast(nn.Module, self)
241
+ modules = list(self_module.modules()) if recurse else [self_module]
242
+ for module in modules:
243
+ if isinstance(module, FSDPModule):
244
+ state = module._get_fsdp_state()
245
+ if fsdp_param_group := state._fsdp_param_group:
246
+ fsdp_param_group.reduce_grads = requires_gradient_sync
247
+ fsdp_param_group.all_reduce_grads = requires_gradient_sync
248
+
249
+ def set_requires_all_reduce(
250
+ self, requires_all_reduce: bool, *, recurse: bool = True
251
+ ) -> None:
252
+ """
253
+ Sets if the module should all-reduce gradients. This can be used to
254
+ implement gradient accumulation with only reduce-scatter but not
255
+ all-reduce for HSDP.
256
+ """
257
+ self_module = cast(nn.Module, self)
258
+ modules = list(self_module.modules()) if recurse else [self_module]
259
+ for module in modules:
260
+ if isinstance(module, FSDPModule):
261
+ state = module._get_fsdp_state()
262
+ if fsdp_param_group := state._fsdp_param_group:
263
+ fsdp_param_group.all_reduce_grads = requires_all_reduce
264
+
265
+ def set_reshard_after_backward(
266
+ self, reshard_after_backward: bool, *, recurse: bool = True
267
+ ) -> None:
268
+ """
269
+ Sets if the module should reshard parameters after backward. This can
270
+ be used during gradient accumulation to trade off higher memory for
271
+ reduced communication.
272
+
273
+ Args:
274
+ reshard_after_backward (bool): Whether to reshard parameters after
275
+ backward.
276
+ recurse (bool): Whether to set for all submodules or just the
277
+ passed-in module.
278
+ """
279
+ self_module = cast(nn.Module, self)
280
+ modules = list(self_module.modules()) if recurse else [self_module]
281
+ for module in modules:
282
+ if isinstance(module, FSDPModule):
283
+ state = module._get_fsdp_state()
284
+ if fsdp_param_group := state._fsdp_param_group:
285
+ fsdp_param_group.reshard_after_backward = reshard_after_backward
286
+
287
+ def set_modules_to_forward_prefetch(self, modules: List["FSDPModule"]) -> None:
288
+ """
289
+ Sets the FSDP modules for which this FSDP module should explicitly
290
+ prefetch all-gathers in forward. The prefetching runs after this
291
+ module's all-gather copy-out.
292
+
293
+ Passing a singleton list containing the next FSDP module gives the same
294
+ all-gather overlap behavior as the default overlap behavior, except the
295
+ prefetched all-gather is issued earlier from the CPU. Passing a list
296
+ with at least length two is required for more aggressive overlap and
297
+ will use more reserved memory.
298
+
299
+ Args:
300
+ modules (List[FSDPModule]): FSDP modules to prefetch.
301
+ """
302
+ _assert_all_fsdp_modules(modules)
303
+ self._get_fsdp_state()._states_to_forward_prefetch = [
304
+ module._get_fsdp_state() for module in modules
305
+ ]
306
+
307
+ def set_modules_to_backward_prefetch(self, modules: List["FSDPModule"]) -> None:
308
+ """
309
+ Sets the FSDP modules for which this FSDP module should explicitly
310
+ prefetch all-gathers in backward. This overrides the default backward
311
+ pretching implementation that prefetches the next FSDP module based on
312
+ the reverse post-forward order.
313
+
314
+ Passing a singleton list containing the previous FSDP module gives the
315
+ same all-gather overlap behavior as the default overlap behavior.
316
+ Passing a list with at least length two is required for more aggressive
317
+ overlap and will use more reserved memory.
318
+
319
+ Args:
320
+ modules (List[FSDPModule]): FSDP modules to prefetch.
321
+ """
322
+ _assert_all_fsdp_modules(modules)
323
+ self._get_fsdp_state()._states_to_backward_prefetch = [
324
+ module._get_fsdp_state() for module in modules
325
+ ]
326
+
327
+ def set_post_optim_event(self, event: torch.cuda.Event) -> None:
328
+ """
329
+ Sets a post-optimizer-step event for the root FSDP module to wait the
330
+ all-gather streams on.
331
+
332
+ By default, the root FSDP module waits the all-gather streams on the
333
+ current stream to ensure that the optimizer step has finished before
334
+ all-gathering. However, this may introduce false dependencies if
335
+ there is unrelated computation after the optimizer step. This API
336
+ allows the user to provide their own event to wait on. After the root
337
+ waits on the event, the event is discarded, so this API should be
338
+ called with a new event each iteration.
339
+
340
+ Args:
341
+ event (torch.cuda.Event): Event recorded after the optimizer step
342
+ to wait all-gather streams on.
343
+ """
344
+ self._get_fsdp_state()._state_ctx.post_optim_event = event
345
+
346
+ def set_reduce_scatter_divide_factor(self, factor: float) -> None:
347
+ """
348
+ Sets a custom divide factor for the reduce-scatter. This becomes a
349
+ custom reduce op using NCCL's PreMulSum, which allows multiplying by
350
+ the factor before reduction.
351
+
352
+ Args:
353
+ factor (float): Custom divide factor.
354
+ """
355
+ state = self._get_fsdp_state()
356
+ if (fsdp_param_group := state._fsdp_param_group) is not None:
357
+ mul_factor = 1.0 / float(factor)
358
+ reduce_op = torch.distributed._make_nccl_premul_sum(mul_factor)
359
+ fsdp_param_group.reduce_scatter_reduce_op = reduce_op
360
+
361
+ def _get_fsdp_state(self) -> FSDPState:
362
+ if (state := _get_module_fsdp_state(cast(nn.Module, self))) is None:
363
+ raise AssertionError(f"No FSDP state found on {self}")
364
+ return state
365
+
366
+ def _apply(self, *args: Any, **kwargs: Any) -> Any:
367
+ # Reshard to ensure that sharded parameters are registered
368
+ self.reshard()
369
+ ret = super()._apply(*args, **kwargs) # type: ignore[misc]
370
+ state = self._get_fsdp_state()
371
+ if not (fsdp_param_group := state._fsdp_param_group):
372
+ return ret
373
+ # TODO: Remove this padding logic once DTensor pads the local tensor:
374
+ # https://github.com/pytorch/pytorch/issues/113045
375
+ with torch.no_grad():
376
+ for fsdp_param in fsdp_param_group.fsdp_params:
377
+ fsdp_param.reset_sharded_param()
378
+ return ret
379
+
380
+
381
+ class UnshardHandle:
382
+ """
383
+ A handle to wait on the unshard op.
384
+
385
+ Args:
386
+ fsdp_param_group (FSDPParamGroup, optional): FSDP parameter group to
387
+ unshard. This should be ``None`` iff the FSDP module does not
388
+ manage any parameters, meaning the unshard is a no-op.
389
+ """
390
+
391
+ def __init__(self, fsdp_param_group: Optional[FSDPParamGroup]):
392
+ self._fsdp_param_group = fsdp_param_group
393
+
394
+ def wait(self):
395
+ """
396
+ Waits on the unshard op.
397
+
398
+ This ensures that the current stream can use the unsharded parameters,
399
+ which are now registered to the module.
400
+ """
401
+ if self._fsdp_param_group is not None:
402
+ self._fsdp_param_group.wait_for_unshard()
403
+ # Avoid keeping a reference
404
+ self._fsdp_param_group = None
405
+
406
+
407
+ def register_fsdp_forward_method(module: nn.Module, method_name: str) -> None:
408
+ """
409
+ Registers a method on ``module`` to be a forward method for FSDP.
410
+
411
+ FSDP only knows to run its pre-forward and post-forward hooks on the
412
+ default :meth:`nn.Module.forward` method. This function patches a user
413
+ specified method to run the pre/post-forward hooks before/after the method,
414
+ respectively. If ``module`` is not an :class:`FSDPModule`, then this is a
415
+ no-op.
416
+
417
+ Args:
418
+ module (nn.Module): Module to register the forward method on.
419
+ method_name (str): Name of the forward method.
420
+ """
421
+ if not isinstance(module, FSDPModule):
422
+ # Make no-op to allow including both when using/not using FSDP
423
+ return
424
+ if not hasattr(module, method_name):
425
+ raise ValueError(f"{type(module)} does not have a method {method_name}")
426
+ orig_method = getattr(module, method_name)
427
+
428
+ @functools.wraps(orig_method)
429
+ def wrapped_method(self, *args, **kwargs):
430
+ fsdp_state = self._get_fsdp_state()
431
+ args, kwargs = fsdp_state._pre_forward(self, args, kwargs)
432
+ out = orig_method(*args, **kwargs)
433
+ return fsdp_state._post_forward(self, args, out)
434
+
435
+ # Use `__get__` to make `wrapped_method` an instance method
436
+ setattr(
437
+ module,
438
+ method_name,
439
+ wrapped_method.__get__(module, type(module)), # type:ignore[attr-defined]
440
+ )
441
+
442
+
443
+ def _assert_all_fsdp_modules(modules: Iterable[Any]) -> None:
444
+ for module in modules:
445
+ if not isinstance(module, FSDPModule):
446
+ raise ValueError(f"Expects FSDPModule but got {type(module)}: {module}")
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/fully_shard.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ from typing import Callable, Iterable, Optional, Union
3
+ from typing_extensions import deprecated
4
+
5
+ import torch
6
+ import torch.distributed as dist
7
+ import torch.nn as nn
8
+ from torch.distributed._composable.contract import contract
9
+ from torch.distributed._composable_state import _get_module_state, _insert_module_state
10
+ from torch.distributed.fsdp._common_utils import _FSDPState
11
+ from torch.distributed.fsdp._dynamo_utils import _annotate_modules_for_dynamo
12
+ from torch.distributed.fsdp._init_utils import (
13
+ _init_buffer_state,
14
+ _init_core_state,
15
+ _init_device_handle,
16
+ _init_ignored_module_states,
17
+ _init_param_handle_from_module,
18
+ _init_prefetching_state,
19
+ _init_process_group_state,
20
+ _init_runtime_state,
21
+ _init_state_dict_state,
22
+ HYBRID_SHARDING_STRATEGIES,
23
+ )
24
+ from torch.distributed.fsdp._runtime_utils import (
25
+ _register_post_forward_hook,
26
+ _register_pre_forward_hook,
27
+ _register_root_pre_forward_hook,
28
+ )
29
+ from torch.distributed.fsdp._state_dict_utils import _register_all_state_dict_hooks
30
+ from torch.distributed.fsdp._wrap_utils import _auto_wrap
31
+ from torch.distributed.fsdp.api import (
32
+ BackwardPrefetch,
33
+ CPUOffload,
34
+ MixedPrecision,
35
+ ShardingStrategy,
36
+ )
37
+ from torch.distributed.fsdp.wrap import _Policy
38
+
39
+
40
+ @contract(state_cls=_FSDPState)
41
+ @deprecated(
42
+ "`torch.distributed._composable.fully_shard` is being deprecated. "
43
+ "You can continue to use the wrapper based FSDP. "
44
+ "See usage in: https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/fully_sharded_data_parallel.py. "
45
+ "`torch.distributed._composable.fully_shard` will be removed after PyTorch 2.5.",
46
+ category=FutureWarning,
47
+ )
48
+ def fully_shard(
49
+ module: nn.Module,
50
+ *,
51
+ process_group: Optional[dist.ProcessGroup] = None,
52
+ policy: Optional[_Policy] = None,
53
+ strategy: Optional[ShardingStrategy] = None,
54
+ mixed_precision: Optional[MixedPrecision] = None,
55
+ cpu_offload: Optional[CPUOffload] = None,
56
+ ignored_modules: Optional[Iterable[torch.nn.Module]] = None,
57
+ device_id: Optional[Union[int, torch.device]] = None,
58
+ param_init_fn: Optional[Callable[[nn.Module], None]] = None,
59
+ sync_module_states: bool = False,
60
+ forward_prefetch: bool = False,
61
+ ignored_states: Union[
62
+ Optional[Iterable[torch.nn.Parameter]], Optional[Iterable[torch.nn.Module]]
63
+ ] = None,
64
+ ) -> nn.Module:
65
+ """Applies ``FullyShardedDataParallel`` (FSDP) semantics to ``module``."""
66
+ torch._C._log_api_usage_once("torch.distributed.fully_shard")
67
+ # Enforce the new auto wrap policy
68
+ if policy is not None and not isinstance(policy, _Policy):
69
+ raise ValueError(f"Expects a `_Policy` but got {policy}")
70
+ state = fully_shard.state(module)
71
+ state = _init_ignored_module_states(state, module, ignored_modules, ignored_states)
72
+ state = _init_device_handle(state, module, state._ignored_params, device_id)
73
+ _annotate_modules_for_dynamo(module, state._ignored_modules, True)
74
+ state = _init_process_group_state(state, process_group, strategy, policy)
75
+ if policy is not None:
76
+ root_kwargs = {
77
+ "process_group": process_group,
78
+ "strategy": strategy,
79
+ "mixed_precision": mixed_precision,
80
+ "cpu_offload": cpu_offload,
81
+ "ignored_modules": ignored_modules,
82
+ "device_id": device_id,
83
+ "param_init_fn": param_init_fn,
84
+ "sync_module_states": sync_module_states,
85
+ "forward_prefetch": forward_prefetch,
86
+ "ignored_states": ignored_states,
87
+ }
88
+ if strategy in HYBRID_SHARDING_STRATEGIES:
89
+ root_kwargs["process_group"] = (state.process_group, state._inter_node_pg)
90
+ _auto_wrap(
91
+ module,
92
+ policy,
93
+ state._ignored_modules,
94
+ state._ignored_params,
95
+ root_kwargs,
96
+ fully_shard,
97
+ )
98
+ state = _init_core_state(
99
+ state,
100
+ strategy or ShardingStrategy.FULL_SHARD,
101
+ mixed_precision,
102
+ cpu_offload,
103
+ limit_all_gathers=True,
104
+ use_orig_params=True,
105
+ backward_prefetch_limit=1,
106
+ forward_prefetch_limit=1,
107
+ )
108
+ state = _init_runtime_state(state)
109
+ state = _init_prefetching_state(
110
+ state, BackwardPrefetch.BACKWARD_PRE, forward_prefetch=forward_prefetch
111
+ )
112
+ state = _init_buffer_state(state, module)
113
+ state = _init_param_handle_from_module(
114
+ state, module, device_id, param_init_fn, sync_module_states
115
+ )
116
+ state = _init_state_dict_state(state)
117
+ _register_all_state_dict_hooks(state)
118
+ _register_pre_forward_hook(state, module)
119
+ _register_post_forward_hook(state, module)
120
+ _register_root_pre_forward_hook(state, module) # prepend last
121
+ # Always insert the state for the passed-in module even if it has no
122
+ # managed parameters, in which case it has no handles and does not appear
123
+ # in `_fully_sharded_module_to_handles`
124
+ _insert_module_state(module, state)
125
+ for submodule in module.modules():
126
+ if (
127
+ submodule in state._fully_sharded_module_to_handle
128
+ and _get_module_state(submodule) is None
129
+ ):
130
+ _insert_module_state(submodule, state)
131
+ return module
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_composable/replicate.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ # mypy: allow-untyped-defs
3
+ import weakref
4
+ from typing import Any, cast, Dict, Iterable, List, NoReturn, Optional, Set, Tuple
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ from torch.distributed._composable_state import _State
9
+ from torch.nn.parallel import DistributedDataParallel
10
+
11
+ from .contract import _get_registry, contract
12
+
13
+
14
+ _ROOT_MODULE_PREFIX = ""
15
+
16
+
17
+ class _ReplicateState(_State):
18
+ def __init__(self) -> None:
19
+ super().__init__()
20
+ self.module: nn.Module = nn.ParameterList()
21
+ self.has_initialized: bool = False
22
+ self._param_list: nn.ParameterList = nn.ParameterList()
23
+ # TODO(@fegin): this variable is originally create for testing, we
24
+ # should remove this if possible.
25
+ self._orig_module = self.module
26
+ self._param_names: List[str] = []
27
+ self._no_sync: bool = False
28
+ self._init_args: Optional[Tuple[Any, ...]] = None
29
+ self._init_kwargs: Dict[str, Any] = {}
30
+ self._comm_hook_args: List[Any] = []
31
+
32
+ def _collect_params(
33
+ self,
34
+ module: nn.Module,
35
+ ignored_modules: Set[nn.Module],
36
+ ignored_params: Set[nn.Parameter],
37
+ prefix: str = _ROOT_MODULE_PREFIX,
38
+ ) -> None:
39
+ # skip if managed by fully_sharded API
40
+ if _is_fully_sharded(module):
41
+ return
42
+
43
+ # if a module is ignored, all descendants of the module are ignored.
44
+ if module in ignored_modules:
45
+ return
46
+
47
+ recurse_prefix = (
48
+ f"{prefix}." if prefix != _ROOT_MODULE_PREFIX else _ROOT_MODULE_PREFIX
49
+ )
50
+
51
+ for n, p in module.named_parameters(recurse=False):
52
+ if p not in ignored_params:
53
+ self._param_list.append(p)
54
+ self._param_names.append(f"{recurse_prefix}{n}")
55
+
56
+ for name, child_module in module.named_children():
57
+ self._collect_params(
58
+ child_module,
59
+ ignored_modules,
60
+ ignored_params,
61
+ prefix=f"{recurse_prefix}{name}",
62
+ )
63
+
64
+ def lazy_init(self) -> None:
65
+ @torch._disable_dynamo(recursive=True)
66
+ def _lazy_init():
67
+ assert self._init_args is not None
68
+ self.init(*self._init_args, **self._init_kwargs)
69
+ self.register_comm_hook()
70
+ self._init_args = ()
71
+ self._init_kwargs = {}
72
+
73
+ _lazy_init()
74
+
75
+ def init(
76
+ self,
77
+ module: nn.Module,
78
+ ignored_modules: Set[nn.Module],
79
+ **kwargs,
80
+ ) -> None:
81
+ if self.has_initialized:
82
+ return
83
+
84
+ self.has_initialized = True
85
+
86
+ device_mesh = kwargs.get("device_mesh", None)
87
+ self.module = module
88
+ ignored_params = {p for m in ignored_modules for p in m.parameters()}
89
+ for submodule in module.modules():
90
+ if _is_fully_sharded(submodule):
91
+ ignored_params.update(submodule.parameters())
92
+ from torch.distributed.tensor.parallel.ddp import _localize_dtensor
93
+
94
+ _localize_dtensor(module, ignored_params=ignored_params)
95
+ self._collect_params(module, ignored_modules, ignored_params)
96
+
97
+ if "device_id" in kwargs:
98
+ # replicate() supports a small usability enhancement where
99
+ # user can pass in device_id as a Union[int, torch.device] even for
100
+ # CPU devices so users don't have to change code for CPU/GPU runs.
101
+ # We derive the right device_ids to feed into DDP to support this.
102
+ if kwargs["device_id"] is not None:
103
+ device_id = kwargs["device_id"]
104
+ # Convert to device_ids that DDP expects.
105
+ if isinstance(device_id, torch.device) and device_id.type == "cpu":
106
+ # CPU modules receive device_ids None
107
+ kwargs["device_ids"] = None
108
+ else:
109
+ # GPU modules expect device_ids=[cuda_device]
110
+ kwargs["device_ids"] = [device_id]
111
+ else:
112
+ kwargs["device_ids"] = None
113
+ kwargs.pop("device_id")
114
+
115
+ self._ddp = DistributedDataParallel(self._param_list, **kwargs)
116
+ # Weakref to the DDP instance is currently only used for testing.
117
+ replicate.state(self.module)._ddp_weakref = weakref.ref(self._ddp)
118
+
119
+ def register_comm_hook(self) -> None:
120
+ for comm_args, comm_kwargs in self._comm_hook_args:
121
+ self._ddp.register_comm_hook(*comm_args, **comm_kwargs)
122
+ self._comm_hook_args.clear()
123
+
124
+ def record_init_args(self, *args, **kwargs) -> None:
125
+ self._init_args = args
126
+ self._init_kwargs = kwargs
127
+
128
+ def forward_pre_hook(
129
+ self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
130
+ ) -> Any:
131
+ if self._init_args or self._init_kwargs:
132
+ self.lazy_init()
133
+ self._ddp.require_backward_grad_sync = not self._no_sync
134
+ return self._ddp._pre_forward(*args, **kwargs)
135
+
136
+ def forward_post_hook(
137
+ self,
138
+ module: nn.Module,
139
+ input: Tuple[torch.Tensor],
140
+ output: torch.Tensor,
141
+ ) -> torch.Tensor:
142
+ return self._ddp._post_forward(output)
143
+
144
+
145
+ def unimplemented_deepcopy(*args: Any, **kwargs: Any) -> NoReturn:
146
+ raise AssertionError(
147
+ "DDP does not support deepcopy. Please use state dict for serialization."
148
+ )
149
+
150
+
151
+ # Follow the same pattern as FSDP/fully_shard
152
+ class DDP:
153
+ def __new__(cls, *args, **kwargs):
154
+ """
155
+ Override ``__new__`` to remove the DDP class and directly construct
156
+ the original class for cases like indexing into a container module.
157
+ """
158
+ # Use index 2 since 0 is the dynamically constructed `DDP<...>` class
159
+ # and index 1 is the `DDP` class itself
160
+ orig_cls = cls.__mro__[2]
161
+ return orig_cls.__new__(orig_cls, *args, **kwargs)
162
+
163
+ def set_requires_gradient_sync(self, requires_gradient_sync: bool) -> None:
164
+ """
165
+ Sets if the module should sync gradients. This can be used to implement
166
+ gradient accumulation without communication.
167
+
168
+ Args:
169
+ requires_gradient_sync (bool): Whether to reduce gradients for the
170
+ module's parameters.
171
+ """
172
+ replicate.state(self)._no_sync = not requires_gradient_sync
173
+
174
+ def register_comm_hook(self, *args, **kwargs) -> None:
175
+ replicate.state(self)._comm_hook_args.append((args, kwargs))
176
+
177
+
178
+ @contract(state_cls=_ReplicateState)
179
+ def replicate(
180
+ module: nn.Module,
181
+ ignored_modules: Optional[Iterable[torch.nn.Module]] = None,
182
+ **kwargs,
183
+ ) -> nn.Module:
184
+ r"""Replicates a module
185
+
186
+ Args:
187
+ module (torch.nn.Module): module to replicate
188
+
189
+ Example::
190
+ >>> # xdoctest: +REQUIRES(module:torch._C._distributed_c10d)
191
+ >>> module = nn.Linear(3, 3)
192
+ >>> replicate(module)
193
+ """
194
+ torch._C._log_api_usage_once("torch.distributed.replicate")
195
+
196
+ # TODO(fegin): using kwargs is not a good idea if we would like to make
197
+ # replicate a formal API to replace DDP.
198
+ if "device_id" in kwargs:
199
+ if not isinstance(kwargs["device_id"], (int, torch.device)):
200
+ raise RuntimeError(
201
+ "Expected device_id to be int or torch.device, "
202
+ f"but got {type(kwargs['device_id'])}"
203
+ )
204
+
205
+ if _is_fully_sharded(module):
206
+ raise RuntimeError(
207
+ "Cannot apply `replicate()` on a Module already managed by `fully_shard`"
208
+ )
209
+
210
+ if ignored_modules is None:
211
+ ignored_modules = {}
212
+ else:
213
+ ignored_modules = set(ignored_modules)
214
+
215
+ state = cast(_ReplicateState, replicate.state(module))
216
+ module.register_forward_pre_hook(state.forward_pre_hook, with_kwargs=True)
217
+ device_mesh = kwargs.get("device_mesh", None)
218
+ if device_mesh is not None:
219
+ from torch.distributed.device_mesh import _mesh_resources
220
+
221
+ root_mesh = _mesh_resources.get_root_mesh(device_mesh)
222
+ # if a root mesh is not the same as device_mesh,
223
+ # meaning the device_mesh is sliced out from the root mesh.
224
+ if root_mesh != device_mesh:
225
+ # TODO: This is a temporary work around to enable DDP + TP.
226
+ # We should do the logic in DDP so that the 2D implementation is
227
+ # sound and the state_dict works out of the box.
228
+ #
229
+ # This won't conflict with what is done in DDP class as the module
230
+ # replicate is going to pass is NOT the original module.
231
+ from torch.distributed.tensor.parallel.ddp import (
232
+ _localize_dtensor,
233
+ _reconstruct_dtensor,
234
+ )
235
+
236
+ module.register_forward_pre_hook(_reconstruct_dtensor)
237
+ module.register_forward_hook(_localize_dtensor)
238
+
239
+ module.register_forward_hook(state.forward_post_hook) # type: ignore[arg-type]
240
+
241
+ state.record_init_args(module, ignored_modules, **kwargs)
242
+
243
+ # Place DDP leftmost for highest priority in the method resolution order
244
+ cls = module.__class__
245
+ dct = {"__deepcopy__": unimplemented_deepcopy}
246
+ new_cls = type(f"DDP{cls.__name__}", (DDP, cls), dct)
247
+ module.__class__ = new_cls
248
+ return module
249
+
250
+
251
+ def _is_fully_sharded(module: nn.Module) -> bool:
252
+ r"""Check if module is marked with fully_shard."""
253
+ registry = _get_registry(module)
254
+ if registry is None:
255
+ return False
256
+ return "fully_shard" in registry
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_tools/__pycache__/fsdp2_mem_tracker.cpython-310.pyc ADDED
Binary file (19 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/_tools/mem_tracker.py ADDED
@@ -0,0 +1,943 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ import re
4
+ import warnings
5
+ from copy import deepcopy
6
+ from enum import auto, Enum
7
+ from functools import partial, wraps
8
+ from typing import (
9
+ Any,
10
+ Callable,
11
+ Dict,
12
+ List,
13
+ Optional,
14
+ Set,
15
+ Tuple,
16
+ Type,
17
+ TYPE_CHECKING,
18
+ Union,
19
+ )
20
+ from typing_extensions import Self
21
+
22
+ import torch
23
+ from torch import nn, optim
24
+ from torch.distributed._tools.mod_tracker import ModTracker
25
+ from torch.optim.optimizer import (
26
+ register_optimizer_step_post_hook,
27
+ register_optimizer_step_pre_hook,
28
+ )
29
+ from torch.utils._python_dispatch import (
30
+ is_traceable_wrapper_subclass,
31
+ TorchDispatchMode,
32
+ )
33
+ from torch.utils._pytree import tree_flatten, tree_map_only
34
+ from torch.utils.weak import WeakIdKeyDictionary, weakref
35
+
36
+
37
+ if TYPE_CHECKING:
38
+ from torch.utils.hooks import RemovableHandle
39
+
40
+ # This value is hard-coded here:
41
+ # https://github.com/pytorch/pytorch/blob/5fba5d83f0703ff8077ab65448a998e9ad6598fd/c10/cuda/CUDACachingAllocator.cpp#L117
42
+ _PYTORCH_MIN_ALLOCATE = (
43
+ 2**9 if int(os.environ.get("PYTORCH_NO_CUDA_MEMORY_CACHING", 0)) == 0 else 1
44
+ )
45
+ _TOTAL_KEY = "Total"
46
+
47
+ __all__ = ["MemTracker"]
48
+
49
+
50
+ class _RefType(str, Enum):
51
+ """Base Class for defining memory reference types, categorizing tensors based on their usage within a model."""
52
+
53
+
54
+ class _State(str, Enum):
55
+ """Base Class for defining module state to capture snapshots ."""
56
+
57
+
58
+ class _MemRefType(_RefType):
59
+ """
60
+ An enum to define memory reference types, categorizing tensors based on their usage within a model.
61
+
62
+ - PARAM: Tensors registered as nn.Parameter within modules.
63
+ - BUFFER: Tensors registered as nn.Buffer within modules.
64
+ - GRAD: Gradients associated with parameters.
65
+ - ACT: Tensors produced during the forward pass and recomputation in activation checkpointing.
66
+ - TMP: Temporary memory used during the backward pass, including gradients of activations.
67
+ - OPT: Tensors holding optimizer states.
68
+ - OTH: Tensors registered via `track_external` that do not fit the above categories.
69
+ """
70
+
71
+ PARAM = "Parameter"
72
+ BUFFER = "Buffer"
73
+ GRAD = "Gradient"
74
+ ACT = "Activation"
75
+ TEMP = "Temp"
76
+ OPT = "Optstate"
77
+ OTH = "Other"
78
+
79
+
80
+ class _ModState(_State):
81
+ """
82
+ An enum to define the state of a module.
83
+
84
+ - PRE_FW: The module is about to run the forward pass.
85
+ - POST_FW: The module has finished running the forward pass.
86
+ - PEAK_FW: The module has reached the peak memory usage during the forward pass.
87
+ - PRE_BW: The module is about to run the backward pass.
88
+ - PRE_FW_AC: The module is about to run the forward pass with activation checkpointing.
89
+ - POST_FW_AC: The module has finished running the forward pass with activation checkpointing.
90
+ - POST_BW: The module has finished running the backward pass.
91
+ - PEAK_BW: The module has reached the peak memory usage during the backward pass.
92
+ """
93
+
94
+ PRE_FW = "Pre-Forward"
95
+ POST_FW = "Post-Forward"
96
+ PEAK_FW = "Peak-Forward"
97
+ PRE_BW = "Pre-Backward"
98
+ PRE_FW_AC = "Pre-Forward-AC"
99
+ POST_FW_AC = "Post-Forward-AC"
100
+ POST_BW = "Post-Backward"
101
+ PEAK_BW = "Peak-Backward"
102
+
103
+
104
+ class _ModMemStats:
105
+ """
106
+ A class to store the memory statistics of a module.
107
+
108
+ Args:
109
+ mod_fqn (str): The fully qualified name of the module.
110
+ Attributes:
111
+ mod_fqn (str): The fully qualified name of the module.
112
+ parameter_mem (int): The memory usage of the parameters of the module.
113
+ buffer_mem (int): The memory usage of the buffers of the module.
114
+ input_mem (int): The memory usage of the inputs to the module.
115
+ output_mem (int): The memory usage of the outputs from the module.
116
+ snapshots (Dict[_ModState, Dict[torch.device, Dict[str, int]]]): A dictionary of memory snapshots
117
+ of the module at different states defined by ``_ModState``.
118
+ Note:
119
+ The memory snapshot is stored as a dictionary - Dict[torch.device, Dict[str, int]], where each key is a device,
120
+ and each value is another dictionary with keys as memory reference types defined by `_MemRefType` and
121
+ values as the memory consumed in bytes.
122
+ """
123
+
124
+ def __init__(self, mod_fqn: str):
125
+ self.mod_fqn = mod_fqn
126
+ self.parameter_mem: int
127
+ self.buffer_mem: int
128
+ self.input_mem: int
129
+ self.output_mem: int
130
+ self.local_peak: Dict[torch.device, int] = {}
131
+ self.snapshots: Dict[_ModState, List[Dict[torch.device, Dict[str, int]]]] = {}
132
+
133
+
134
+ class _WeakRefInfo:
135
+ """
136
+ Manages memory statistics and device attributes for tensor storages.
137
+ """
138
+
139
+ def __init__(
140
+ self, size: int, element_size: int, device: torch.device, reftype: _RefType
141
+ ) -> None:
142
+ """
143
+ Initializes the ``_WeakRefInfo`` object with tensor storage properties.
144
+
145
+ Args:
146
+ size (int): The number of elements in the tensor storage.
147
+ element_size (int): The size of each element in the tensor storage.
148
+ device (torch.device): The device on which the tensor is allocated.
149
+ reftype (_RefType): The reference type of the tensor.
150
+ """
151
+ self.size = size
152
+ self.element_size = element_size
153
+ self.reftype = reftype
154
+ self.device = device
155
+ self.mem_consumed = self._calculate_mem_consumed()
156
+
157
+ def _calculate_mem_consumed(self) -> int:
158
+ """
159
+ Calculates the memory consumed by the tensor storage, considering device-specific allocation rules.
160
+
161
+ Returns:
162
+ int: The memory consumed in bytes.
163
+ """
164
+ mem = self.size * self.element_size
165
+ if self.device.type == "cuda":
166
+ return math.ceil((mem) / _PYTORCH_MIN_ALLOCATE) * _PYTORCH_MIN_ALLOCATE
167
+ return mem
168
+
169
+ def update_mem_consumed(self, st: torch.UntypedStorage) -> int:
170
+ """
171
+ Updates and returns the memory consumed if the storage size has changed.
172
+
173
+ Args:
174
+ st (torch.UntypedStorage): The tensor storage to check for size updates.
175
+
176
+ Returns:
177
+ int: The updated memory consumed in bytes.
178
+ """
179
+ if st.size() != self.size:
180
+ self.size = st.size()
181
+ self.mem_consumed = self._calculate_mem_consumed()
182
+ return self.mem_consumed
183
+
184
+ @staticmethod
185
+ def get_untyped_storages(t: torch.Tensor) -> Set[torch.UntypedStorage]:
186
+ """
187
+ Recursively extracts untyped storages from a tensor or its subclasses.
188
+
189
+ Args:
190
+ t (torch.Tensor): The tensor to extract storages from.
191
+
192
+ Returns:
193
+ Set[torch.UntypedStorage]: A set of untyped storages.
194
+ """
195
+ unflattened_tensors = [t]
196
+ flattened_tensor_storages = set()
197
+ while len(unflattened_tensors) > 0:
198
+ obj = unflattened_tensors.pop()
199
+ if is_traceable_wrapper_subclass(obj):
200
+ attrs, _ = obj.__tensor_flatten__() # type: ignore[attr-defined]
201
+ unflattened_tensors.extend([getattr(obj, attr) for attr in attrs])
202
+ else:
203
+ if not hasattr(obj, "untyped_storage"):
204
+ warnings.warn(
205
+ f"Expected a tensor or a traceable wrapper-subclass of tensor, but got {type(obj)}",
206
+ category=UserWarning,
207
+ stacklevel=2,
208
+ )
209
+ else:
210
+ flattened_tensor_storages.add(obj.untyped_storage())
211
+ return flattened_tensor_storages
212
+
213
+ @classmethod
214
+ def create_winfo(
215
+ cls,
216
+ st: torch.UntypedStorage,
217
+ device: torch.device,
218
+ reftype: _RefType,
219
+ callback: Optional[Callable[[Self, weakref.ref], Any]] = None,
220
+ ) -> Tuple[Self, weakref.ref]:
221
+ """
222
+ Creates a new ``_WeakRefInfo`` instance and a weak reference to a ``torch.UntypedStorage`` object,
223
+ optionally attaching a callback to the weak reference.
224
+
225
+ Args:
226
+ st (torch.UntypedStorage): The storage object for which to create the weak reference info.
227
+ device (torch.device): The device associated with the storage object.
228
+ reftype (_RefType): The type of reference, used to categorize the storage.
229
+ callback (Optional[Callable[[Self, weakref.ref]]]): A callback function that is called when
230
+ the storage object is about to be finalized (garbage collected). The callback function
231
+ should accept two arguments: the ``_WeakRefInfo`` instance and the weak reference to the storage.
232
+ Returns:
233
+ Tuple[Self, weakref.ref]: A tuple containing the newly created ``_WeakRefInfo`` instance and the
234
+ weak reference to the storage object. The weak reference may have an attached callback if provided.
235
+ """
236
+
237
+ winfo = cls(st.size(), st.element_size(), device, reftype)
238
+ w_st = weakref.ref(st, partial(callback, winfo) if callback else None)
239
+ return winfo, w_st
240
+
241
+
242
+ def _get_mem_divisor(units: str) -> int:
243
+ unit_dict = {"B": 1, "KiB": 2**10, "MiB": 2**20, "GiB": 2**30}
244
+ if units in unit_dict:
245
+ return unit_dict[units]
246
+ else:
247
+ raise ValueError(
248
+ f"Unsupported unit: {units}. Supported units are: {', '.join(unit_dict.keys())}"
249
+ )
250
+
251
+
252
+ def _rounding_fn(value: int, divisor: int, precision: int) -> Union[float, int]:
253
+ return value if divisor == 1 else round(value / divisor, precision)
254
+
255
+
256
+ def _print_snapshot(snapshot: Dict[torch.device, Dict[str, int]], units: str) -> None:
257
+ if len(snapshot) == 0:
258
+ print("No memory tracked.")
259
+ return
260
+ divisor = _get_mem_divisor(units)
261
+ for dev, dev_snap in snapshot.items():
262
+ if _rounding_fn(dev_snap[_TOTAL_KEY], divisor, 2) <= 0:
263
+ continue
264
+ print(
265
+ f"Device: {dev}",
266
+ *(
267
+ f"\t{k}: {_rounding_fn(v, divisor, 2)} {units}"
268
+ for k, v in dev_snap.items()
269
+ ),
270
+ sep="\n",
271
+ )
272
+
273
+
274
+ def _print_snapshot_tabular(
275
+ snapshot: Dict[torch.device, Dict[str, int]], units: str
276
+ ) -> None:
277
+ if len(snapshot) == 0:
278
+ print("No memory tracked.")
279
+ return
280
+ try:
281
+ from tabulate import tabulate
282
+ except ImportError as err:
283
+ raise ImportError(
284
+ "Please install tabulate to use the tabulate option."
285
+ ) from err
286
+ divisor = _get_mem_divisor(units)
287
+ table_data = []
288
+ key_list = list(next(iter(snapshot.values())).keys())
289
+ headers = ["Device"] + [f"{key}" for key in key_list]
290
+
291
+ for dev, dev_snap in snapshot.items():
292
+ if _rounding_fn(dev_snap[_TOTAL_KEY], divisor, 2) <= 0:
293
+ continue
294
+ row = [str(dev)]
295
+ row.extend(f"{_rounding_fn(v, divisor, 2)} {units}" for v in dev_snap.values())
296
+ table_data.append(row)
297
+ print(tabulate(table_data, headers=headers, tablefmt="rst"))
298
+
299
+
300
+ def _print_state_snapshots(
301
+ snapshots: Dict[_State, List[Dict[torch.device, Dict[str, int]]]], units: str
302
+ ) -> None:
303
+ for state, snapshot_list in snapshots.items():
304
+ print(f"{state}")
305
+ for i, snapshot in enumerate(snapshot_list):
306
+ print(f"# {i + 1}:")
307
+ _print_snapshot(snapshot, units)
308
+ print()
309
+
310
+
311
+ def _print_state_snapshots_tabular(
312
+ snapshots: Dict[_State, List[Dict[torch.device, Dict[str, int]]]], units: str
313
+ ) -> None:
314
+ try:
315
+ from tabulate import tabulate
316
+ except ImportError as err:
317
+ raise ImportError(
318
+ "Please install tabulate to use the tabulate option."
319
+ ) from err
320
+
321
+ table_data = []
322
+ last_state_call = None
323
+ divisor = _get_mem_divisor(units)
324
+ for state, snapshot_list in snapshots.items():
325
+ for i, snapshot in enumerate(snapshot_list):
326
+ state_call = f"{state} # {i + 1}"
327
+ for dev, dev_snap in snapshot.items():
328
+ if _rounding_fn(dev_snap[_TOTAL_KEY], divisor, 2) <= 0:
329
+ continue
330
+ row = {
331
+ "State & Call": (
332
+ state_call if state_call != last_state_call else ""
333
+ ),
334
+ "Device": str(dev),
335
+ }
336
+ last_state_call = state_call
337
+ for k, v in dev_snap.items():
338
+ row[f"{k}"] = f"{_rounding_fn(v, divisor, 2)} {units}"
339
+ table_data.append(row)
340
+ print(tabulate(table_data, headers="keys", tablefmt="rst"))
341
+
342
+
343
+ class _UpdateType(Enum):
344
+ # These are used for tracking updates to the continuouly maintained memory snapshot.
345
+ # ADD - When a new tensor storage is tracked
346
+ # DEL - When a tensor storage is about to be finalized (garbage collected).
347
+ # REF - When a tensor reference is updated, for instance, the gradients are marked as
348
+ # generic backward reference types until the grad_hook categorizes them as gradients.
349
+ # SIZE - When a tensor's storage is resized.
350
+ ADD = auto()
351
+ DEL = auto()
352
+ REF = auto()
353
+ SIZE = auto()
354
+
355
+
356
+ class MemTracker(TorchDispatchMode):
357
+ """
358
+ A TorchDispatchMode to track, categorize and attribute the tensor memory created or accessed within its context.
359
+
360
+ It categorizes the tracked tensors as parameters, buffers, activations, gradients, temporary memory and optimizer states
361
+ as defined by ``_MemRefType`` within its context. It captures memory `snapshots` for the modules, called within its context,
362
+ at various states defined by ``_ModState``.
363
+
364
+ Attributes:
365
+ memory_tracking: A weakref key dictionary to store the memory statistics of each module. Each key
366
+ is a reference to a module, and each value is a ``_ModMemStats`` object that stores the memory
367
+ statistics of the module.
368
+
369
+ Note:
370
+ The MemTracker should be used as a context manager. The modules, optimizers, and any other tensors created within
371
+ the context of MemTracker will be tracked by default. Any tensors or stateful objects such as modules, optimizers etc.
372
+ that need to be tracked but are created outside the MemTracker should be registered using the `track_external` method.
373
+ The `track_external` method should be called before the MemTracker is used. Any tensors created outside the ``MemTracker``
374
+ and not supplied to the `track_external` method will not be tracked by the ``MemTracker``.
375
+
376
+ Example usage:
377
+
378
+ .. code-block:: python
379
+
380
+ module = ...
381
+ optimizer = ...
382
+ inp = ...
383
+ mem_tracker = MemTracker()
384
+ mem_tracker.track_external(module, optimizer, inp)
385
+ with mem_tracker as mt:
386
+ loss = module(inp)
387
+ print("After Forward:")
388
+ mt.display_snapshot("current")
389
+ loss.backward()
390
+ optimizer.step()
391
+ optimizer.zero_grad()
392
+ mt.display_snapshot("peak")
393
+ mt.display_modulewise_snapshots(depth = 3, units = "MiB")
394
+
395
+ Known Limitations:
396
+ - The ``MemTracker`` does not track memory for tensors that bypass the ``TorchDispatchMode`` ex. under ``no_dispatch``.
397
+ - Resizing tensor storages directly by using non-Tensor methods other than using ``torch.Untyped_Storage.resize_``
398
+ is not tracked. File a Github issue if you have use-cases for this.
399
+ - If the tensors are not traceable or wrappable subclasses of ``torch.Tensor``, then the tracker does not know how to
400
+ track their storages. File a Github issue if you have use-cases for this.
401
+ - During AC in the backward pass there might be misattribution between activation and temp memory, but the peak memory
402
+ will be tracked accurately. This will be fixed in the next update by hooking intricately with ``torch.uitls.checkpoint``.
403
+ """
404
+
405
+ def __init__(self) -> None:
406
+ self.memory_tracking = WeakIdKeyDictionary()
407
+ self._curr_mem_snap: Dict[torch.device, Dict[str, int]] = {}
408
+ self._peak_mem: Dict[torch.device, int] = {}
409
+ self._peak_mem_snap: Dict[torch.device, Dict[str, int]] = {}
410
+ self._param_to_grad_hook_handles = WeakIdKeyDictionary()
411
+ self._optimizer_hook_handles: Optional[
412
+ Tuple[RemovableHandle, RemovableHandle]
413
+ ] = None
414
+ # Dictionary to store the ``_WeakRefInfo`` instances corresponding to each tensor's storage.
415
+ self._WINFO = WeakIdKeyDictionary()
416
+ self._mod_tracker = ModTracker()
417
+ # This is a general memory tracker which can be used with any ``_RefType`` subclass
418
+ self._ref_class: Type[_RefType] = _MemRefType
419
+ # Flags to track if we are in the AC region or optimizer step region
420
+ self._in_opt: bool = False
421
+ self._in_ac: bool = False
422
+ # Weak references to the topmost AC module currently active
423
+ self._ac_mod: Optional[weakref.ref] = None
424
+ self._orig_resize = torch.UntypedStorage.resize_
425
+
426
+ def _update_snap(
427
+ self,
428
+ u_type: _UpdateType,
429
+ winfo: _WeakRefInfo,
430
+ old_mem_consumed: Optional[int] = None,
431
+ old_reftype: Optional[_RefType] = None,
432
+ ) -> None:
433
+ # Initialize a flag to track if the total memory might drop to zero after updates.
434
+ maybe_zero = False
435
+ # Ensure the device entry exists in the current memory snapshot, initializing if necessary.
436
+ dev_snap = self._curr_mem_snap.setdefault(
437
+ winfo.device, dict.fromkeys(self._ref_class, 0)
438
+ )
439
+ dev_snap.setdefault(_TOTAL_KEY, 0)
440
+ # Handle different types of updates based on the update type (`u_type`).
441
+ if u_type == _UpdateType.ADD:
442
+ # Increase the memory consumed for the specific reference type and update the total.
443
+ dev_snap[winfo.reftype] += winfo.mem_consumed
444
+ dev_snap[_TOTAL_KEY] += winfo.mem_consumed
445
+ elif u_type == _UpdateType.DEL:
446
+ # Decrease the memory consumed for the specific reference type and reduce the total.
447
+ dev_snap[winfo.reftype] -= winfo.mem_consumed
448
+ dev_snap[_TOTAL_KEY] -= winfo.mem_consumed
449
+ maybe_zero = True
450
+ elif u_type == _UpdateType.REF:
451
+ assert old_reftype is not None
452
+ # Adjust memory consumption between two reference types within the same device.
453
+ dev_snap[old_reftype] -= winfo.mem_consumed
454
+ dev_snap[winfo.reftype] += winfo.mem_consumed
455
+ elif u_type == _UpdateType.SIZE:
456
+ assert old_mem_consumed is not None
457
+ # Adjust the memory consumed for a reference type due to a change in size.
458
+ change = winfo.mem_consumed - old_mem_consumed
459
+ dev_snap[winfo.reftype] += change
460
+ dev_snap[_TOTAL_KEY] += change
461
+ maybe_zero = True
462
+ else:
463
+ raise ValueError(f"Invalid update type: {u_type}")
464
+ # Check if the total memory for the device has dropped to zero.
465
+ if maybe_zero:
466
+ if self._curr_mem_snap[winfo.device][_TOTAL_KEY] == 0:
467
+ # Remove the device entry from the memory snapshot if the total memory is zero.
468
+ del self._curr_mem_snap[winfo.device]
469
+
470
+ def _update_and_maybe_create_winfos(
471
+ self,
472
+ t: torch.Tensor,
473
+ reftype: _RefType,
474
+ update_existing: bool = False,
475
+ ) -> Set[_WeakRefInfo]:
476
+ sts = _WeakRefInfo.get_untyped_storages(t)
477
+ winfos = set()
478
+ for st in sts:
479
+ # Attempt to retrieve existing ``_WeakRefInfo`` and its weak reference from the tracking dictionary.
480
+ winfo, _ = self._WINFO.get(st, (None, None))
481
+ if winfo is not None:
482
+ # If ``_WeakRefInfo`` exists, check if the reference type needs to be updated.
483
+ old_reftype = winfo.reftype
484
+ if old_reftype != reftype:
485
+ # Update the reference type and apply changes via ``_update_snap``.
486
+ winfo.reftype = reftype
487
+ self._update_snap(_UpdateType.REF, winfo, old_reftype=old_reftype)
488
+ winfos.add(winfo)
489
+ elif update_existing:
490
+ # If no existing ``_WeakRefInfo`` is found and update_existing is True, raise an error.
491
+ raise KeyError("No existing winfo found")
492
+ else:
493
+ # If no existing _WeakRefInfo is found and update_existing is False, create a new ``_WeakRefInfo``.
494
+ winfo, w_st = _WeakRefInfo.create_winfo(
495
+ st, t.device, reftype, self._delete_callback
496
+ )
497
+ # Store the new ``_WeakRefInfo`` and its weak reference in the tracking dictionary.
498
+ self._WINFO[st] = (winfo, w_st)
499
+ # Update the snapshot for the newly added ``_WeakRefInfo``.
500
+ if winfo.mem_consumed > 0:
501
+ self._update_snap(_UpdateType.ADD, winfo)
502
+ winfos.add(winfo)
503
+ return winfos
504
+
505
+ def _delete_callback(self, winfo: _WeakRefInfo, w_st: weakref.ref) -> None:
506
+ # Callback to be called when the storage object corresponding to the ``_WeakRefInfo``
507
+ # instance is about to be finalized.
508
+ if winfo.mem_consumed > 0:
509
+ self._update_snap(_UpdateType.DEL, winfo)
510
+
511
+ def _track_resize(self) -> None:
512
+ # Need to monkey-patch this because ``torch.UntypedStorage.resize_`` is not captured
513
+ # by ``TorchDispatchMode``.
514
+ @wraps(self._orig_resize)
515
+ def resize_(st: torch.UntypedStorage, size: int) -> None:
516
+ self._orig_resize(st, size)
517
+ winfo, _ = self._WINFO.get(st, (None, None))
518
+ if winfo is not None and winfo.size != st.size():
519
+ old_mem_consumed = winfo.mem_consumed
520
+ winfo.update_mem_consumed(st)
521
+ self._update_snap(
522
+ _UpdateType.SIZE, winfo, old_mem_consumed=old_mem_consumed
523
+ )
524
+
525
+ torch.UntypedStorage.resize_ = resize_ # type: ignore[method-assign, assignment]
526
+
527
+ def _restore_resize(self) -> None:
528
+ torch.UntypedStorage.resize_ = self._orig_resize # type: ignore[method-assign]
529
+
530
+ def _update_peak_stats(self, peak_state: _State) -> None:
531
+ # We first capture the current memory snapshot of the current tracker state then,
532
+ # We step through each of the modules we have tracked so far in ``memory_tracking``
533
+ # and check if it is currently active by querying ``_mod_tracker.parents``
534
+ # If it is active, we update the per device peak memory usage for the module
535
+ # corresponding to the ``_State`` which can be ``PEAK_FW`` or ``PEAK_BW``.
536
+ curr_snap = self._curr_mem_snap
537
+
538
+ for mod_stats in self.memory_tracking.values():
539
+ if mod_stats.mod_fqn in self._mod_tracker.parents:
540
+ if peak_state in mod_stats.snapshots:
541
+ for dev, dev_snap in curr_snap.items():
542
+ if mod_stats.local_peak.get(dev, 0) < dev_snap[_TOTAL_KEY]:
543
+ mod_stats.local_peak[dev] = dev_snap[_TOTAL_KEY]
544
+ mod_stats.snapshots[peak_state][-1][dev] = deepcopy(
545
+ dev_snap
546
+ )
547
+
548
+ for dev, dev_snap in curr_snap.items():
549
+ if self._peak_mem.get(dev, 0) < dev_snap[_TOTAL_KEY]:
550
+ self._peak_mem[dev] = dev_snap[_TOTAL_KEY]
551
+ self._peak_mem_snap[dev] = deepcopy(dev_snap)
552
+
553
+ def _track(self, reftype: _RefType, t: torch.Tensor) -> None:
554
+ # Get the storages of the tensor and check if we have already tracked them.
555
+ # If yes, then check if the storage size has changed and update the current snapshot.
556
+ # Else create a new ``_WeakRefInfo`` instance and add it to the dictionary.
557
+ sts = _WeakRefInfo.get_untyped_storages(t)
558
+ for st in sts:
559
+ winfo, _ = self._WINFO.get(st, (None, None))
560
+ if winfo is not None:
561
+ if winfo.size != st.size():
562
+ old_mem_consumed = winfo.mem_consumed
563
+ winfo.update_mem_consumed(st)
564
+ self._update_snap(
565
+ _UpdateType.SIZE, winfo, old_mem_consumed=old_mem_consumed
566
+ )
567
+ return
568
+ else:
569
+ winfo, w_st = _WeakRefInfo.create_winfo(
570
+ st, t.device, reftype, self._delete_callback
571
+ )
572
+ self._WINFO[st] = (winfo, w_st)
573
+ # Update the current snapshot for the newly added ``_WeakRefInfo``.
574
+ if winfo.mem_consumed > 0:
575
+ self._update_snap(_UpdateType.ADD, winfo)
576
+
577
+ def get_tracker_snapshot(
578
+ self, type: str = "current"
579
+ ) -> Dict[torch.device, Dict[str, int]]:
580
+ """
581
+ Capture a snapshot of the memory usage breakdown per device, based on the specified type.
582
+
583
+ Args:
584
+ type (str): The type of snapshot to capture. Can be "current" for the current memory usage or "peak" for the
585
+ peak memory usage. Defaults to "current".
586
+ Returns:
587
+ Dict[torch.device, Dict[str, int]]: A dictionary where each key is a torch.device, and each value is another
588
+ dictionary. This inner dictionary has keys representing memory reference
589
+ types as defined in ``_MemRefType`` and values representing the amount of
590
+ memory consumed in bytes.
591
+ Raises:
592
+ ValueError: If an invalid type is specified.
593
+ """
594
+ if type == "current":
595
+ return deepcopy(self._curr_mem_snap)
596
+ elif type == "peak":
597
+ return deepcopy(self._peak_mem_snap)
598
+ else:
599
+ raise ValueError(f"Invalid type {type}")
600
+
601
+ def _track_module_params_and_buffers(
602
+ self, module: nn.Module, install_grad_hooks: bool = True
603
+ ) -> Tuple[int, int]:
604
+ # Track the parameters and buffers of the module if not already tracked.
605
+ # If the parameters have gradients, track the gradients as well.
606
+ # If install_grad_hooks is True, install a gradient hook on the parameters
607
+ # to track the gradients, if it has not already been installed.
608
+ # Return the total memory consumed by the parameters and buffers.
609
+ def _grad_hook(grad: torch.Tensor) -> None:
610
+ self._update_and_maybe_create_winfos(
611
+ grad,
612
+ _MemRefType.GRAD,
613
+ )
614
+
615
+ param_memory = 0
616
+ for param in module.parameters():
617
+ winfos = self._update_and_maybe_create_winfos(
618
+ param,
619
+ _MemRefType.PARAM,
620
+ )
621
+ param_memory += sum(winfo.mem_consumed for winfo in winfos)
622
+ if param.grad is not None:
623
+ self._update_and_maybe_create_winfos(
624
+ param.grad,
625
+ _MemRefType.GRAD,
626
+ )
627
+ if (
628
+ self._param_to_grad_hook_handles.get(param, None) is None
629
+ and install_grad_hooks
630
+ ):
631
+ grad_hook_handle = param.register_hook(_grad_hook)
632
+ post_acc_grad_hook_handle = param.register_post_accumulate_grad_hook(
633
+ lambda p: (_grad_hook(p.grad))
634
+ )
635
+ self._param_to_grad_hook_handles[param] = (
636
+ grad_hook_handle,
637
+ post_acc_grad_hook_handle,
638
+ )
639
+ buffer_memory = 0
640
+ for buffer in module.buffers():
641
+ winfos = self._update_and_maybe_create_winfos(
642
+ buffer,
643
+ _MemRefType.BUFFER,
644
+ )
645
+ buffer_memory += sum(winfo.mem_consumed for winfo in winfos)
646
+ return (param_memory, buffer_memory)
647
+
648
+ def _track_inputs_or_outputs(self, args: Any) -> int:
649
+ # Calculate the memory consumed by the inputs or outputs of the module.
650
+ input_or_output_memory = 0
651
+
652
+ def add_inps_or_outs(t: torch.Tensor) -> None:
653
+ nonlocal input_or_output_memory
654
+ sts = _WeakRefInfo.get_untyped_storages(t)
655
+ for st in sts:
656
+ winfo, _ = self._WINFO.get(st, (None, None))
657
+ if winfo is not None:
658
+ input_or_output_memory += winfo.mem_consumed
659
+
660
+ tree_map_only(torch.Tensor, add_inps_or_outs, args)
661
+ return input_or_output_memory
662
+
663
+ def _pre_fw_hook(self, module: nn.Module, inputs: Any) -> None:
664
+ # This is installed as a pre-fwd user hook with ``ModTracker.`` Based on the following cases we
665
+ # set the state and capture the memory snapshot for the module.
666
+ # Case 1: If the module is not in the ``memory_tracking`` dictionary, we track the parameters, buffers,
667
+ # input and output memory of the module. Create a new ``_ModMemStats`` instance for the module
668
+ # and add it to the ``memory_tracking`` dictionary.
669
+ # Case 2: If the module is already in the ``memory_tracking`` dictionary and we are in backward, this means
670
+ # we are in the AC region. We check if this is the top most module in the AC region. If it is,
671
+ # we store a weak reference and set the flag ``_in_ac`` to True.
672
+ # Case 3: If the module is already in the ``memory_tracking`` dictionary and we are in forward, this means
673
+ # this module is called for the second time. If it is a root module, that means we are in the next
674
+ # iteration and we error out. If it is not a root module, that means it's a submodule that is being
675
+ # used multiple times in the same iteration, which we allow and track.
676
+ # For Case 1 and 3, we also initialiaze the ``local_peak`` and ``PEAK_FW`` snapshot for the module.
677
+ mod_name = self._mod_tracker.get_known_fqn(module)
678
+ assert mod_name is not None
679
+ if module not in self.memory_tracking:
680
+ mod_stats = _ModMemStats(mod_name)
681
+ param_mem, buffer_mem = self._track_module_params_and_buffers(
682
+ module, install_grad_hooks=True
683
+ )
684
+ input_mem = self._track_inputs_or_outputs(inputs)
685
+ mod_stats.parameter_mem = param_mem
686
+ mod_stats.buffer_mem = buffer_mem
687
+ mod_stats.input_mem = input_mem
688
+ self.memory_tracking[module] = mod_stats
689
+ state = _ModState.PRE_FW
690
+
691
+ elif self._mod_tracker.is_bw:
692
+ mod_stats = self.memory_tracking[module]
693
+ state = _ModState.PRE_FW_AC
694
+ if self._ac_mod is None:
695
+ self._ac_mod = weakref.ref(module)
696
+ self._in_ac = True
697
+ else:
698
+ parents = set(self._mod_tracker.parents) - {mod_name}
699
+ if len(parents) == 1 and "Global" in parents:
700
+ raise NotImplementedError(
701
+ "MemTracker does not support memory tracking for multiple iterative calls."
702
+ " Either use ``reset_mod_stats`` to clear module memory stats for the previous iteration"
703
+ " or file a github issue if you need this feature."
704
+ )
705
+ mod_stats = self.memory_tracking[module]
706
+ state = _ModState.PRE_FW
707
+ input_mem = self._track_inputs_or_outputs(inputs)
708
+ mod_stats.input_mem = input_mem
709
+
710
+ mem_snapshot = self.get_tracker_snapshot()
711
+ if state == _ModState.PRE_FW:
712
+ mod_stats.local_peak = {
713
+ dev: dev_snap[_TOTAL_KEY] for dev, dev_snap in mem_snapshot.items()
714
+ }
715
+ mod_stats.snapshots.setdefault(_ModState.PEAK_FW, []).append(mem_snapshot)
716
+ mod_stats.snapshots.setdefault(state, []).append(deepcopy(mem_snapshot))
717
+
718
+ def _post_fw_hook(self, module: nn.Module, inputs: Any, outputs: Any) -> None:
719
+ # This is installed as a post-fwd user hook with ``ModTracker``. Based on the following cases we
720
+ # set the state and capture the memory snapshot for the module.
721
+ # Case 1: This is called in backward, which means we are in the AC region. If this is the top most module
722
+ # in the AC region, we set the flag ``_in_ac`` to False.
723
+ # Case 2: This is called in forward so we calculate the output memory
724
+ # of the module and update its mod_stats.
725
+ mod_stats = self.memory_tracking[module]
726
+ if self._mod_tracker.is_bw:
727
+ state = _ModState.POST_FW_AC
728
+ if self._ac_mod is not None and self._ac_mod() is module:
729
+ self._ac_mod = None
730
+ self._in_ac = False
731
+ else:
732
+ state = _ModState.POST_FW
733
+ output_mem = self._track_inputs_or_outputs(outputs)
734
+ mod_stats.output_mem = output_mem
735
+ mod_stats.snapshots.setdefault(state, []).append(self.get_tracker_snapshot())
736
+
737
+ def _pre_bw_hook(self, module: nn.Module, args: Any) -> None:
738
+ # This is installed as a pre-bwd user hook with ``ModTracker``. We set the state and capture the
739
+ # snapshot for the module. We also initialize the ``local_peak`` and ``PEAK_BW`` snapshot for it.
740
+ # If the module is None, we skip the hook.
741
+ # This can happen since this installed inside a multi-grad hook on the module's output tensors
742
+ # and the module itself may not be alive during backward.
743
+ if module is None:
744
+ warnings.warn("Module is None. Skipping PRE_BW hook.", stacklevel=2)
745
+ return
746
+ mod_stats = self.memory_tracking[module]
747
+ mem_snapshot = self.get_tracker_snapshot()
748
+ mod_stats.local_peak = {
749
+ dev: dev_snap[_TOTAL_KEY] for dev, dev_snap in mem_snapshot.items()
750
+ }
751
+ mod_stats.snapshots.setdefault(_ModState.PEAK_BW, []).append(mem_snapshot)
752
+ mod_stats.snapshots.setdefault(_ModState.PRE_BW, []).append(
753
+ deepcopy(mem_snapshot)
754
+ )
755
+
756
+ def _post_bw_hook(self, module: nn.Module, args: Any) -> None:
757
+ # This is installed as a post-bwd user hook with ``ModTracker``. We set the state and capture the
758
+ # snapshot for the module if it is not None.
759
+ # This can happen since this installed inside a multi-grad hook on the module's input tensors
760
+ # and the module itself may not be alive during backward.
761
+ if module is None:
762
+ warnings.warn("Module is None. Skipping POST_BW hook.", stacklevel=2)
763
+ return
764
+ mod_stats = self.memory_tracking[module]
765
+ mod_stats.snapshots.setdefault(_ModState.POST_BW, []).append(
766
+ self.get_tracker_snapshot()
767
+ )
768
+
769
+ def _track_optimizer_states(
770
+ self, reftype: _RefType, optimizer: optim.Optimizer
771
+ ) -> None:
772
+ for states in optimizer.state.values():
773
+ for val in states.values():
774
+ if isinstance(val, torch.Tensor):
775
+ self._update_and_maybe_create_winfos(
776
+ val,
777
+ reftype,
778
+ )
779
+
780
+ def _register_global_optimizer_hook(self) -> None:
781
+ # Register a hook on the optimizer step to track the optimizer states.
782
+ # The pre-hook is to set the flag ``_in_opt`` to True. The post-hook unsets the flag,
783
+ # and also tracks any optimizer states that are created during the optimizer step.
784
+ def _opt_step_pre_hook(
785
+ optimizer: optim.Optimizer, args: Any, kwargs: Any
786
+ ) -> None:
787
+ self._in_opt = True
788
+
789
+ def _opt_step_post_hook(
790
+ optimizer: optim.Optimizer, args: Any, kwargs: Any
791
+ ) -> None:
792
+ self._track_optimizer_states(_MemRefType.OPT, optimizer)
793
+ self._in_opt = False
794
+
795
+ self._optimizer_hook_handles = (
796
+ register_optimizer_step_pre_hook(_opt_step_pre_hook),
797
+ register_optimizer_step_post_hook(_opt_step_post_hook),
798
+ )
799
+
800
+ def _deregister_param_and_optimizer_hooks(self) -> None:
801
+ for (
802
+ grad_hook_handle,
803
+ post_acc_grad_hook_handle,
804
+ ) in self._param_to_grad_hook_handles.values():
805
+ grad_hook_handle.remove()
806
+ post_acc_grad_hook_handle.remove()
807
+ self._param_to_grad_hook_handles.clear()
808
+
809
+ if self._optimizer_hook_handles is not None:
810
+ for handle in self._optimizer_hook_handles:
811
+ handle.remove()
812
+ self._optimizer_hook_handles = None
813
+
814
+ def track_external(
815
+ self, *external: Union[nn.Module, optim.Optimizer, torch.Tensor]
816
+ ) -> None:
817
+ """
818
+ Track tensors and stateful objects like modules, optimizers etc. that are created outside the MemTracker.
819
+
820
+ This method should be called before the ``MemTracker`` is used. Any tensors that are not module parameters, buffers,
821
+ gradients activations, or optimizer states will be categorized as ``Other``. If you want them categorized with a
822
+ custom name, please file a GitHub issue. Any tensors created outside the MemTracker and not supplied to this
823
+ method will not be be tracked by ``MemTracker``.
824
+
825
+ Args:
826
+ *external (Union[nn.Module, optim.Optimizer, torch.Tensor]): The external modules, optimizers, and
827
+ tensors to be tracked.
828
+ """
829
+ flat_external, _ = tree_flatten(external)
830
+ for obj in flat_external:
831
+ if isinstance(obj, torch.Tensor):
832
+ self._update_and_maybe_create_winfos(
833
+ obj,
834
+ _MemRefType.OTH,
835
+ )
836
+ elif isinstance(obj, torch.nn.Module):
837
+ self._track_module_params_and_buffers(obj, install_grad_hooks=False)
838
+ elif isinstance(obj, optim.Optimizer):
839
+ self._track_optimizer_states(_MemRefType.OPT, obj)
840
+ else:
841
+ raise TypeError(
842
+ f"Object of type {type(obj)} is not supported for tracking. "
843
+ f"Only stateful objects like modules, optimizers, and tensors are supported."
844
+ )
845
+
846
+ def display_snapshot(
847
+ self, type: str = "current", units: str = "B", tabulate: bool = False
848
+ ) -> None:
849
+ """
850
+ Display the memory usage breakdown snapshot of the tracker based on the specified type and units.
851
+
852
+ Keyword args:
853
+ type (str): The type of snapshot to display. Can be "current" for the current memory usage or "peak" for the
854
+ peak memory usage. Defaults to "current".
855
+ units (str): The units to use for displaying memory usage. Defaults to "B". Supports ["B", "KiB", "MiB", "GiB"].
856
+ tabulate (bool): Whether to display the snapshot in a tabular format. Defaults to False.
857
+ """
858
+ snapshot = self.get_tracker_snapshot(type)
859
+ if tabulate:
860
+ _print_snapshot_tabular(snapshot, units)
861
+ else:
862
+ _print_snapshot(snapshot, units)
863
+
864
+ def display_modulewise_snapshots(
865
+ self, depth: int = 2, units: str = "B", tabulate: bool = False
866
+ ) -> None:
867
+ """
868
+ Print per device memory breakdown snapshot for each module called within MemTracker.
869
+
870
+ Snapshots are displayed for the states defined by ``_ModState``.
871
+ The module hierarchy is displayed up to the specified depth.
872
+
873
+ Keyword Args:
874
+ depth (int, optional): The depth of the module hierarchy to display. Defaults to 2.
875
+ units (str, optional): The units to use for memory tracking. Defaults to "B". Supports ["B", "KiB", "MiB", "GiB"].
876
+ tabulate (bool, optional): Whether to display the snapshot in a tabular format. Defaults to False.
877
+ """
878
+
879
+ def natural_sort_key(s: str) -> List[Union[int, str]]:
880
+ return [
881
+ int(text) if text.isdigit() else text.lower()
882
+ for text in re.split("([0-9]+)", s)
883
+ ]
884
+
885
+ for mod_stats in sorted(
886
+ self.memory_tracking.values(),
887
+ key=lambda m_stats: natural_sort_key(m_stats.mod_fqn),
888
+ ):
889
+ mod_fqn = mod_stats.mod_fqn
890
+ mod_depth = mod_fqn.count(".") + 1
891
+ if mod_depth > depth:
892
+ continue
893
+ print(f"Module: {mod_fqn}")
894
+ if tabulate:
895
+ _print_state_snapshots_tabular(mod_stats.snapshots, units)
896
+ else:
897
+ _print_state_snapshots(mod_stats.snapshots, units)
898
+
899
+ def reset_mod_stats(self) -> None:
900
+ """
901
+ Reset all the module memory stats. Clears ``memory_tracking`` dictionary.
902
+ """
903
+ self.memory_tracking.clear()
904
+
905
+ def __enter__(self) -> "MemTracker":
906
+ self._register_global_optimizer_hook()
907
+ self._mod_tracker.register_user_hooks(
908
+ self._pre_fw_hook,
909
+ self._post_fw_hook,
910
+ self._pre_bw_hook,
911
+ self._post_bw_hook,
912
+ )
913
+ self._track_resize()
914
+ self._peak_mem_snap = self.get_tracker_snapshot()
915
+ self._peak_mem = {
916
+ dev: dev_snap[_TOTAL_KEY] for dev, dev_snap in self._peak_mem_snap.items()
917
+ }
918
+ self._mod_tracker.__enter__()
919
+ super().__enter__()
920
+ return self
921
+
922
+ def __exit__(self, *args: Any) -> None:
923
+ self._deregister_param_and_optimizer_hooks()
924
+ self._mod_tracker.clear_user_hooks()
925
+ self._restore_resize()
926
+ super().__exit__(*args)
927
+ self._mod_tracker.__exit__(*args)
928
+
929
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None): # type: ignore[no-untyped-def]
930
+ res = func(*args, **kwargs or {})
931
+ # If we are tracking an optimizer state, we use the optimizer reference type.
932
+ # If we are in backward region and not in AC region, we use the backward reference type.
933
+ # Else we use the forward reference type.
934
+ if self._in_opt:
935
+ reftype = _MemRefType.OPT
936
+ elif self._mod_tracker.is_bw and not self._in_ac:
937
+ reftype = _MemRefType.TEMP
938
+ else:
939
+ reftype = _MemRefType.ACT
940
+ tree_map_only(torch.Tensor, partial(self._track, reftype), res)
941
+ peak_state = _ModState.PEAK_BW if self._mod_tracker.is_bw else _ModState.PEAK_FW
942
+ self._update_peak_stats(peak_state)
943
+ return res
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/__pycache__/_unshard_param_utils.cpython-310.pyc ADDED
Binary file (8.11 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_flat_param.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_optim_utils.py ADDED
@@ -0,0 +1,2091 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import copy
3
+ import functools
4
+ import logging
5
+ import warnings
6
+ from contextlib import ExitStack
7
+ from dataclasses import dataclass, field
8
+ from typing import (
9
+ Any,
10
+ cast,
11
+ Dict,
12
+ Iterable,
13
+ Iterator,
14
+ List,
15
+ NamedTuple,
16
+ no_type_check,
17
+ Optional,
18
+ Sequence,
19
+ Set,
20
+ Tuple,
21
+ TYPE_CHECKING,
22
+ Union,
23
+ )
24
+
25
+ import torch
26
+ import torch.distributed as dist
27
+ import torch.distributed.fsdp._traversal_utils as traversal_utils
28
+ import torch.nn as nn
29
+ from torch.distributed._state_dict_utils import _gather_state_dict
30
+ from torch.distributed.distributed_c10d import _get_pg_default_device
31
+ from torch.distributed.fsdp._common_utils import (
32
+ _apply_to_modules,
33
+ _FSDPState,
34
+ _get_module_fsdp_state_if_fully_sharded_module,
35
+ _get_param_to_fqns,
36
+ _module_handle,
37
+ _named_parameters_with_duplicates,
38
+ clean_tensor_name,
39
+ )
40
+ from torch.distributed.fsdp._debug_utils import SimpleProfiler
41
+ from torch.distributed.fsdp._flat_param import FlatParameter, FlatParamHandle
42
+ from torch.distributed.fsdp._fsdp_extensions import (
43
+ _ext_chunk_dtensor,
44
+ _ext_chunk_tensor,
45
+ )
46
+ from torch.distributed.fsdp._runtime_utils import (
47
+ _lazy_init,
48
+ _reset_flat_param_grad_info_if_needed,
49
+ )
50
+ from torch.distributed.fsdp.api import (
51
+ ShardingStrategy,
52
+ StateDictSettings,
53
+ StateDictType,
54
+ )
55
+ from torch.distributed.tensor import DTensor, Replicate
56
+ from torch.utils._pytree import tree_map_only
57
+
58
+
59
+ if TYPE_CHECKING:
60
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
61
+
62
+
63
+ logger = logging.getLogger(__name__)
64
+
65
+
66
+ @dataclass
67
+ class FSDPParamInfo:
68
+ state: _FSDPState
69
+ handle: FlatParamHandle
70
+ param_indices: Dict[str, int]
71
+ param_requires_grad: List[bool]
72
+
73
+
74
+ def sorted_items(dictionary: Dict[str, Any]) -> Iterator[Tuple[str, Any]]:
75
+ keys = sorted(dictionary.keys())
76
+ for k in keys:
77
+ yield k, dictionary[k]
78
+
79
+
80
+ @dataclass
81
+ class _ConsolidatedOptimState:
82
+ """
83
+ This holds the consolidated optimizer state on the target rank. Positive-
84
+ dimension tensor state is communicated across ranks, while zero-dimension
85
+ tensor state and non-tensor state is taken directly from the target rank.
86
+
87
+ PyTorch version 1.12 moved to using zero-dimension tensors for scalar
88
+ values, but user implemented optimizers may still use float (i.e. a
89
+ non-tensor). Thus, we support both and handle them identically.
90
+
91
+ Attributes:
92
+ tensor_state (Dict[str, torch.Tensor]): Mapping from positive-dimension
93
+ tensor state name to the unsharded flat tensor representing the
94
+ state.
95
+ zero_dim_tensor_state (Dict[str, torch.Tensor]): Mapping from zero-
96
+ dimension tensor state name to its value.
97
+ non_tensor_state (Dict[str, Any]): Mapping from non-tensor state
98
+ name to its value.
99
+ """
100
+
101
+ tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict)
102
+ zero_dim_tensor_state: Dict[str, torch.Tensor] = field(default_factory=dict)
103
+ non_tensor_state: Dict[str, Any] = field(default_factory=dict)
104
+
105
+
106
+ class _PosDimTensorInfo(NamedTuple):
107
+ """
108
+ Meatadata for positive-dimension tensors used internally for
109
+ :meth:`scatter_full_optim_state_dict`.
110
+
111
+ Attributes:
112
+ shape (torch.Size): Sharded tensor shape (which is equal to the
113
+ unsharded tensor shape if the tensor is optimizer state for a
114
+ non-FSDP parameter and is hence not sharded).
115
+ dtype (torch.dtype): Data type of the tensor.
116
+ """
117
+
118
+ shape: torch.Size
119
+ dtype: torch.dtype
120
+
121
+
122
+ class _OptimStateKey(NamedTuple):
123
+ """
124
+ This represents an optimizer state key that may be used commonly across
125
+ ranks. It is based on the unflattened parameter names rather than parameter
126
+ IDs to make it independent of each rank's own optimizer construction.
127
+ """
128
+
129
+ unflat_param_names: Tuple[str, ...]
130
+ is_fsdp_managed: bool
131
+
132
+
133
+ def _unflatten_optim_state(
134
+ fsdp_param_info: FSDPParamInfo,
135
+ flat_param_state: Dict[str, Any],
136
+ to_save: bool,
137
+ shard_state: bool,
138
+ cpu_offload: bool,
139
+ ) -> List[Dict[str, Any]]:
140
+ """
141
+ Unflattens the optimizer state, consisting of the "state" part and the
142
+ "param_groups" part. Unflattening the "state" part involves consolidating
143
+ the state on the target rank and remapping from flattened to unflattened
144
+ parameter IDs, and the "param_groups" part only involves remapping from
145
+ flattened to unflattened parameter IDs.
146
+
147
+ Args:
148
+ fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
149
+ mapping from FQN to original parameter index.
150
+ flat_param_state (Dict[str, Any]): Entry for the flat parameter in the
151
+ "state" part of the optimizer state dict.
152
+ to_save (bool): Whether to save the state on this rank.
153
+
154
+ Returns:
155
+ List[Dict[str, Any]]: A :class:`list` holding the entries in the
156
+ "state" part of the optimizer state dict corresponding to the
157
+ unflattened parameters comprising the flat parameter if on the target
158
+ rank or an empty :class:`list` otherwise. The final optimizer state
159
+ dict will need to map these entries using the proper unflattened
160
+ parameter IDs.
161
+ """
162
+ assert (
163
+ not shard_state or to_save
164
+ ), "If ``shard_state`` is True, ``to_save`` has to be True."
165
+ consolidated_state = _communicate_optim_state(
166
+ fsdp_param_info,
167
+ flat_param_state,
168
+ )
169
+ if to_save:
170
+ unflat_param_state = _unflatten_communicated_optim_state(
171
+ fsdp_param_info,
172
+ consolidated_state,
173
+ shard_state,
174
+ )
175
+ for optim_state in unflat_param_state:
176
+ # We can't use .items() below cuz we'd run into a concurrent modification error
177
+ if cpu_offload:
178
+ for key in list(optim_state.keys()):
179
+ state = optim_state[key]
180
+ if not isinstance(state, torch.Tensor):
181
+ continue
182
+ optim_state[key] = state.cpu()
183
+ return unflat_param_state
184
+ else:
185
+ return []
186
+
187
+
188
+ def _is_zero_dim_tensor(x: Any) -> bool:
189
+ return torch.is_tensor(x) and x.dim() == 0
190
+
191
+
192
+ def _communicate_optim_state(
193
+ fsdp_param_info: FSDPParamInfo,
194
+ flat_param_state: Dict[str, Any],
195
+ ) -> _ConsolidatedOptimState:
196
+ """
197
+ Communicates the optimizer state for a flat parameter across ranks. All
198
+ ranks will hold the entire non-sharded optimizer state on GPU.
199
+
200
+ If ``N`` is the number of tensor optimizer states in the optimizer state
201
+ dict, then the communication complexity is 0 if ``N = 0`` and ``N + 1``
202
+ otherwise (where the plus 1 comes from all-gathering the padding per rank).
203
+
204
+ Args:
205
+ fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
206
+ mapping from FQN to original parameter index.
207
+ flat_param_state (Dict[str, Any]): The entry in the "state" part of the
208
+ optimizer state dict corresponding to the flat parameter.
209
+
210
+ Returns:
211
+ ConsolidatedOptimState: Consolidated optimizer state for the target
212
+ flat parameter.
213
+ """
214
+ fsdp_state = fsdp_param_info.state
215
+ flat_param = fsdp_param_info.handle.flat_param
216
+ state = _ConsolidatedOptimState()
217
+ tensor_state, zero_dim_tensor_state, non_tensor_state = (
218
+ state.tensor_state,
219
+ state.zero_dim_tensor_state,
220
+ state.non_tensor_state,
221
+ )
222
+
223
+ for state_name, value in sorted_items(flat_param_state):
224
+ # Positive-dimension tensor state: communicate across ranks
225
+ if torch.is_tensor(value) and value.dim() > 0:
226
+ # If the parameter is not sharded, then neither is the
227
+ # positive-dimension tensor state, so no need to communicate it --
228
+ # we take the target rank's value
229
+ if (
230
+ fsdp_state.world_size == 1
231
+ or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD
232
+ ):
233
+ tensor_state[state_name] = value
234
+ continue
235
+ assert (
236
+ fsdp_state.compute_device is not None
237
+ ), "compute_device has not been initialized"
238
+ if value.device.type != fsdp_state.compute_device.type:
239
+ value = value.to(fsdp_state.compute_device)
240
+ # Assume that positive-dimension tensor optimizer state
241
+ # has the same shape as the sharded flat parameter
242
+ buffer_size = flat_param._full_param_padded.size() # type: ignore[attr-defined]
243
+ tensor_buffer = value.new_zeros(*buffer_size)
244
+ dist.all_gather_into_tensor(
245
+ tensor_buffer, value, group=fsdp_state.process_group
246
+ )
247
+ fsdp_state._device_handle.synchronize()
248
+ unpadded_numel = cast(
249
+ nn.Parameter, flat_param._unpadded_unsharded_size
250
+ ).numel()
251
+ tensor_state[state_name] = tensor_buffer[:unpadded_numel]
252
+ # Zero-dimension tensor state and non-tensor state: take this rank's
253
+ # value directly
254
+ else:
255
+ if _is_zero_dim_tensor(value):
256
+ zero_dim_tensor_state[state_name] = value.detach().clone()
257
+ else:
258
+ non_tensor_state[state_name] = value
259
+ return state
260
+
261
+
262
+ def _unflatten_communicated_optim_state(
263
+ fsdp_param_info: FSDPParamInfo,
264
+ state: _ConsolidatedOptimState,
265
+ shard_state: bool,
266
+ ) -> List[Dict[str, Any]]:
267
+ """
268
+ Unflattens the communicated optimizer state (given by ``tensor_state``,
269
+ ``non_tensor_state``, and ``zero_dim_tensor_state``) for a single flat
270
+ parameter. This should only be called on the target rank.
271
+
272
+ Args:
273
+ fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
274
+ mapping from FQN to original parameter index.
275
+ state (_ConsolidatedOptimState): Consolidated optimizer state.
276
+
277
+ Returns:
278
+ List[Dict[str, Any]]: A :class:`list` holding the entries in the
279
+ "state" part of the optimizer state dict corresponding to the
280
+ unflattened parameters comprising the flat parameter. The final
281
+ optimizer state dict will need to map these entries using the proper
282
+ unflattened parameter IDs.
283
+ """
284
+ fsdp_state = fsdp_param_info.state
285
+ handle = fsdp_param_info.handle
286
+ flat_param = handle.flat_param
287
+ unflat_param_state: List[Dict[str, Any]] = []
288
+ flat_param_views: Dict[str, Iterator] = {}
289
+ num_unflat_params = flat_param._num_params
290
+ tensor_state, zero_dim_tensor_state, non_tensor_state = (
291
+ state.tensor_state,
292
+ state.zero_dim_tensor_state,
293
+ state.non_tensor_state,
294
+ )
295
+
296
+ for _ in range(num_unflat_params):
297
+ unflat_state_param = {}
298
+ # Add positive-dimension tensor state: unflatten with views
299
+ for state_name, flat_tensor in sorted_items(tensor_state):
300
+ views_generated = state_name in flat_param_views
301
+ if not views_generated:
302
+ views = handle._get_unflat_views(flat_tensor)
303
+ flat_param_views[state_name] = views
304
+ else:
305
+ views = flat_param_views[state_name]
306
+ optim_state: Union[torch.Tensor, ShardedTensor, DTensor] = next(views)
307
+ if shard_state:
308
+ osd_config = fsdp_state._optim_state_dict_config
309
+ if getattr(osd_config, "_use_dtensor", False):
310
+ assert fsdp_state._device_mesh is not None
311
+ optim_state = _ext_chunk_dtensor(
312
+ optim_state,
313
+ fsdp_state.rank,
314
+ fsdp_state._device_mesh,
315
+ fsdp_state._fsdp_extension,
316
+ )
317
+ else:
318
+ assert fsdp_state.process_group is not None
319
+ optim_state = _ext_chunk_tensor(
320
+ optim_state,
321
+ fsdp_state.rank,
322
+ fsdp_state.world_size,
323
+ fsdp_state._device_handle.device_count(),
324
+ fsdp_state.process_group,
325
+ fsdp_state._fsdp_extension,
326
+ )
327
+ unflat_state_param[state_name] = optim_state
328
+
329
+ # Add zero-dimension tensor state: take the target rank's value
330
+ for state_name, zero_dim_tensor in sorted_items(zero_dim_tensor_state):
331
+ unflat_state_param[state_name] = zero_dim_tensor
332
+ # Add non-tensor state: take the target rank's value
333
+ for state_name, non_tensor in sorted_items(non_tensor_state):
334
+ unflat_state_param[state_name] = non_tensor
335
+ unflat_param_state.append(unflat_state_param)
336
+ return unflat_param_state
337
+
338
+
339
+ def _broadcast_processed_state(
340
+ fsdp_state: _FSDPState,
341
+ optim_state: Dict[str, Any],
342
+ group: Optional[dist.ProcessGroup],
343
+ ) -> Dict[str, Any]:
344
+ objects: List[Any] = [None]
345
+ if dist.get_rank(group) == 0:
346
+ objects[0] = tree_map_only(
347
+ torch.Tensor,
348
+ lambda v: v.cpu() if v.dim() == 0 else _PosDimTensorInfo(v.shape, v.dtype), # type: ignore[union-attr]
349
+ optim_state,
350
+ )
351
+ dist.broadcast_object_list(objects, src=0, group=group)
352
+ if dist.get_rank(group) == 0:
353
+ return optim_state
354
+ else:
355
+ return objects[0]
356
+
357
+
358
+ def _broadcast_state(
359
+ fsdp_state: _FSDPState, state: Any, group: Optional[dist.ProcessGroup]
360
+ ) -> Any:
361
+ if dist.get_rank(group) == 0:
362
+ if not isinstance(state, torch.Tensor) or state.dim() == 0:
363
+ return state
364
+ tensor = state.to(fsdp_state.compute_device)
365
+ else:
366
+ if isinstance(state, torch.Tensor):
367
+ assert state.dim() == 0, (
368
+ "For non-zero ranks, a tensor state should have zero dimension, "
369
+ "but got the state with shape {state.shape()}."
370
+ )
371
+ return state
372
+ elif not isinstance(state, _PosDimTensorInfo):
373
+ return state
374
+ tensor = torch.zeros(
375
+ state.shape, dtype=state.dtype, device=fsdp_state.compute_device
376
+ )
377
+ dist.broadcast(tensor, src=0, group=group)
378
+ return tensor
379
+
380
+
381
+ def _shard_orig_param_state(
382
+ fsdp_param_info: FSDPParamInfo,
383
+ fqn: str,
384
+ optim_state: Dict[str, Any],
385
+ ) -> Dict[str, Any]:
386
+ """
387
+ Shard the optimizer state for the original parameter with the name ``fqn``.
388
+ This API should only be used when ``use_orig_params`` is True.
389
+ """
390
+ if not optim_state:
391
+ return {}
392
+ fsdp_state = fsdp_param_info.state
393
+ flat_param = fsdp_param_info.handle.flat_param
394
+ param_idx = fsdp_param_info.param_indices[fqn]
395
+ shard_param_info = flat_param._shard_param_infos[param_idx] # type: ignore[attr-defined]
396
+ optim_state = _gather_state_dict(
397
+ optim_state, pg=fsdp_state.process_group, device=fsdp_state.compute_device
398
+ )
399
+ if not shard_param_info.in_shard:
400
+ return {}
401
+ # Flatten and shard the state.
402
+ new_optim_state: Dict[str, Any] = {}
403
+ intra_param_start_idx = shard_param_info.intra_param_start_idx
404
+ intra_param_end_idx = shard_param_info.intra_param_end_idx
405
+ for state_name, value in optim_state.items():
406
+ if (
407
+ torch.is_tensor(value)
408
+ and value.dim() > 0
409
+ and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD
410
+ ):
411
+ value = value.flatten()[intra_param_start_idx : intra_param_end_idx + 1].clone() # type: ignore[operator]
412
+ new_optim_state[state_name] = value
413
+ return new_optim_state
414
+
415
+
416
+ def _flatten_optim_state_dict(
417
+ optim_state_dict: Dict[str, Any],
418
+ model: nn.Module,
419
+ use_orig_params: bool = False,
420
+ optim: Optional[torch.optim.Optimizer] = None,
421
+ rank0_only: bool = False,
422
+ group: Optional[dist.ProcessGroup] = None,
423
+ ) -> Dict[str, Any]:
424
+ """
425
+ Flattens the full optimizer state dict, still keying by unflattened parameter
426
+ names.
427
+
428
+ If ``use_orig_params`` is True, each rank will have all FSDP-managed
429
+ parameters but some of these parameters may be empty due to the sharding.
430
+ For a regular optim.Optimizer, states for those empty parameters will
431
+ not be initialized. So, when aggregating the FQNs across ranks, no assert
432
+ will be raised on a rank even if it does not have all the states -- it is
433
+ valid and FSDP know how to aggregate them. However, FSDP has to ignore
434
+ handling those parameters that are not managed by FSDP and do not exist on
435
+ the local rank -- it is managed by other parallelism and FSDP does not
436
+ know ho to handle/aggregate them.
437
+
438
+ Note that ``_flatten_tensor_optim_state`` does not need ``optim`` to
439
+ flatten/shard the state. However, NamedOptimizer and KeyedOptimizer require
440
+ all the states even if the corresponding parameters are empty. To this end,
441
+ ``optim`` will be used to to get the initial state of the empty parameters.
442
+ ``optim`` should only be non-None if the ``optim` is KeyedOptimizer or
443
+ NamedOptimizer.
444
+
445
+ Returns:
446
+ Dict[str, Any]: The flattened optimizer state dict.
447
+ """
448
+ SimpleProfiler.reset()
449
+
450
+ unflat_osd = optim_state_dict
451
+ if "state" not in unflat_osd and not rank0_only:
452
+ raise ValueError(
453
+ '`optim_state_dict` must have the keys "state"'
454
+ "to be a valid optimizer state dict"
455
+ )
456
+ param_to_fqns = _get_param_to_fqns(model)
457
+ fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model)
458
+ fsdp_state = next(iter(fqn_to_fsdp_param_info.values())).state
459
+
460
+ # Broadcast unflat_osd without non-scalar tensor if rank0_only is True.
461
+ if rank0_only:
462
+ unflat_osd = _broadcast_processed_state(fsdp_state, unflat_osd, group=group)
463
+
464
+ # Construct the "state" part
465
+ flat_osd_state: Dict[Union[_OptimStateKey, str], Any] = {}
466
+ unflat_osd_state = unflat_osd["state"]
467
+ all_state_keys = set(unflat_osd_state.keys())
468
+
469
+ for param, fqns in param_to_fqns.items():
470
+ fqn = fqns[0]
471
+ if fqn not in unflat_osd_state:
472
+ continue
473
+ all_state_keys.difference_update(fqns)
474
+
475
+ if rank0_only:
476
+ for fqn in fqns:
477
+ if not unflat_osd_state[fqn]:
478
+ continue
479
+ for state_name in unflat_osd_state[fqn].keys():
480
+ unflat_osd_state[fqn][state_name] = _broadcast_state(
481
+ fsdp_state, unflat_osd_state[fqn][state_name], group=group
482
+ )
483
+ fqn = fqns[0]
484
+ if fqn in fqn_to_fsdp_param_info:
485
+ fsdp_param_info = fqn_to_fsdp_param_info[fqn]
486
+ if use_orig_params:
487
+ with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING):
488
+ flat_state = _shard_orig_param_state(
489
+ fsdp_param_info,
490
+ fqn,
491
+ unflat_osd_state[fqn],
492
+ )
493
+ else:
494
+ flat_state = _flatten_optim_state(
495
+ fsdp_param_info,
496
+ unflat_osd_state,
497
+ fqns,
498
+ )
499
+ key = _OptimStateKey(tuple(fqns), True)
500
+ # Only include non-empty states since as expected by
501
+ # `torch.optim.Optimizer` s unless the optimizer is KeyedOptimizer
502
+ # or NamedOptimizer.
503
+ if flat_state:
504
+ flat_osd_state[key] = flat_state
505
+ elif use_orig_params:
506
+ assert (
507
+ len(fqns) == 1
508
+ ), f"use_orig_params is True but there are multiple FQNs, {fqns}."
509
+ if optim is not None: # NamedOptimizer or KeyedOptimizer case.
510
+ state = optim.state.get(param, None) # type: ignore[call-overload]
511
+ if state is not None:
512
+ flat_osd_state[key] = copy.deepcopy(state)
513
+ else:
514
+ warnings.warn(
515
+ f"optim_state[{key}] is not on rank{fsdp_state.rank}."
516
+ )
517
+
518
+ else:
519
+ raise RuntimeError(
520
+ f"The state of {key} is empty. This should happen when "
521
+ "use_orig_params=True."
522
+ )
523
+ else: # do not flatten non-FSDP parameters' states
524
+ assert len(fqns) == 1
525
+ key = _OptimStateKey(tuple(fqns), False)
526
+ flat_osd_state[key] = copy.copy(unflat_osd_state[fqn])
527
+
528
+ if rank0_only:
529
+ for fqn in fqns:
530
+ if not unflat_osd_state[fqn]:
531
+ continue
532
+ for state_name, param_state in list(unflat_osd_state[fqn].items()):
533
+ if fsdp_state.rank > 0:
534
+ # Deference the tensor so that PyTorch can collect the memory.
535
+ del unflat_osd_state[fqn][state_name]
536
+ else:
537
+ # Move the tensor in the original osd back to CPU to make the
538
+ # original osd unaffected.
539
+ unflat_osd_state[fqn][state_name] = unflat_osd_state[fqn][
540
+ state_name
541
+ ].cpu()
542
+
543
+ # Handle user-defined state, states that are not associated with parameters.
544
+ for key in all_state_keys:
545
+ user_state = unflat_osd_state[key]
546
+ if isinstance(user_state, torch.Tensor) and rank0_only and use_orig_params:
547
+ user_state = _broadcast_state(fsdp_state, user_state, group=group)
548
+ flat_osd_state[key] = copy.copy(user_state)
549
+
550
+ SimpleProfiler.dump_and_reset("FSDP _flatten_optim_state_dict() profiling: ")
551
+ # Construct the "param_groups" part -- copy as is since it will be
552
+ # rekeyed later according to the target rank's optimizer
553
+ # Only copy param_groups if it exists in unflat_osd
554
+ if "param_groups" in unflat_osd:
555
+ flat_osd_param_groups = copy.deepcopy(unflat_osd["param_groups"])
556
+ return {"state": flat_osd_state, "param_groups": flat_osd_param_groups}
557
+ else:
558
+ return {"state": flat_osd_state}
559
+
560
+
561
+ def _flatten_optim_state(
562
+ fsdp_param_info: FSDPParamInfo,
563
+ unflat_osd_state: Dict[str, Dict[str, Any]],
564
+ unflat_param_names: List[str],
565
+ ) -> Dict[str, Any]:
566
+ """
567
+ Flattens the optimizer state in ``full_optim_state_dict`` for a single
568
+ flat parameter in ``fsdp_param_info`` corresponding to the unflattened
569
+ parameter names in ``unflat_param_names``.
570
+
571
+ Args:
572
+ fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a
573
+ mapping from FQN to original parameter index.
574
+ unflat_osd_state (Dict[str, Dict[str, Any]]): The "state" part of the
575
+ optimizer state dict corresponding to the unflattened parameters.
576
+ unflat_param_names (List[str]): A :class:`list` of unflattened
577
+ parameter names corresponding to the flat parameter ``flat_param``.
578
+
579
+ Returns:
580
+ Dict[str, Any]: A :class:`dict` mapping state names to their values for
581
+ a particular flat parameter. The sharded optimizer state dict's "state"
582
+ part will map a key to this returned value.
583
+ """
584
+ fsdp_state = fsdp_param_info.state
585
+ handle = fsdp_param_info.handle
586
+ flat_param = handle.flat_param
587
+ num_unflat_params = len(unflat_param_names)
588
+ assert num_unflat_params > 0, (
589
+ "Expects at least one unflattened parameter corresponding to the "
590
+ "flat parameter"
591
+ )
592
+ unflat_param_shapes = flat_param._shapes
593
+ num_unflat_param_shapes = len(unflat_param_shapes)
594
+ assert (
595
+ num_unflat_params == num_unflat_param_shapes
596
+ ), f"Expects {num_unflat_params} shapes but got {num_unflat_param_shapes}"
597
+
598
+ # Check if these unflattened parameters have any optimizer state
599
+ has_state = [
600
+ bool(unflat_param_name in unflat_osd_state)
601
+ for unflat_param_name in unflat_param_names
602
+ ]
603
+ # If none of the unflattened parameters comprising this flat parameter have
604
+ # any state, then we do not want an entry in the optimizer state dict
605
+ if not any(has_state):
606
+ return {} # no need to flatten any state
607
+ # There may still be some unflattened parameters with state and some
608
+ # without
609
+ unflat_param_states = [
610
+ _gather_state_dict(
611
+ unflat_osd_state[unflat_param_name],
612
+ pg=fsdp_state.process_group,
613
+ device=fsdp_state.compute_device,
614
+ )
615
+ if unflat_param_name in unflat_osd_state
616
+ else None
617
+ for unflat_param_name in unflat_param_names
618
+ ]
619
+ # Check that the unflattened parameters have the same state names
620
+ state_names = None
621
+ for unflat_param_state in unflat_param_states:
622
+ if unflat_param_state is None:
623
+ continue
624
+ if state_names is None:
625
+ state_names = set(unflat_param_state.keys())
626
+ else:
627
+ if state_names != set(unflat_param_state.keys()):
628
+ raise ValueError(
629
+ "Differing optimizer state names for the unflattened "
630
+ f"parameters: {unflat_param_names}"
631
+ )
632
+ assert state_names is not None
633
+
634
+ # Flatten the state
635
+ flat_state: Dict[str, Any] = {}
636
+ for state_name in state_names:
637
+ state_values = [
638
+ unflat_param_state[state_name] if unflat_param_state is not None else None
639
+ for unflat_param_state in unflat_param_states
640
+ ]
641
+ non_none_state_values = [v for v in state_values if v is not None]
642
+ # If all ranks have None, this is a None value
643
+ if not non_none_state_values:
644
+ flat_state[state_name] = None
645
+ continue
646
+ are_pos_dim_tensors = are_zero_dim_tensors = are_non_tensors = True
647
+ for v in non_none_state_values:
648
+ are_pos_dim_tensors &= torch.is_tensor(v) and v.dim() > 0
649
+ are_zero_dim_tensors &= _is_zero_dim_tensor(v)
650
+ are_non_tensors &= not torch.is_tensor(v)
651
+ types = {type(v) for v in non_none_state_values}
652
+ if len(types) != 1 or not (
653
+ are_pos_dim_tensors or are_zero_dim_tensors or are_non_tensors
654
+ ):
655
+ raise ValueError(
656
+ f"Differing optimizer state types for state {state_name}, "
657
+ f"values {non_none_state_values}, and unflattened parameter "
658
+ f"names {unflat_param_names}"
659
+ )
660
+ if are_pos_dim_tensors:
661
+ flat_tensor = _flatten_tensor_optim_state(
662
+ state_name,
663
+ state_values,
664
+ unflat_param_names,
665
+ unflat_param_shapes,
666
+ handle,
667
+ )
668
+ # Shard the flattened tensor immediately to minimize max memory
669
+ # usage
670
+ if (
671
+ fsdp_state.world_size != 1
672
+ and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD
673
+ ):
674
+ sharded_flat_tensor, _ = FlatParamHandle._get_shard(
675
+ flat_tensor,
676
+ fsdp_state.rank,
677
+ fsdp_state.world_size,
678
+ )
679
+ else:
680
+ sharded_flat_tensor = flat_tensor
681
+ flat_state[state_name] = sharded_flat_tensor
682
+ elif are_zero_dim_tensors:
683
+ flat_state[state_name] = _flatten_zero_dim_tensor_optim_state(
684
+ state_name,
685
+ state_values,
686
+ unflat_param_names,
687
+ )
688
+ else:
689
+ assert are_non_tensors
690
+ flat_state[state_name] = _flatten_non_tensor_optim_state(
691
+ state_name,
692
+ state_values,
693
+ unflat_param_names,
694
+ )
695
+
696
+ return flat_state
697
+
698
+
699
+ def _flatten_tensor_optim_state(
700
+ state_name: str,
701
+ pos_dim_tensors: List[torch.Tensor],
702
+ unflat_param_names: List[str],
703
+ unflat_param_shapes: Sequence[torch.Size],
704
+ handle: FlatParamHandle,
705
+ ) -> torch.Tensor:
706
+ """
707
+ Flattens the positive-dimension tensor optimizer state given by the values
708
+ ``tensors`` for the state ``state_name`` for a single flat parameter
709
+ from ``handle`` corresponding to the unflattened parameter names
710
+ ``unflat_param_names`` and unflatted parameter shapes
711
+ ``unflat_param_shapes``. This flattens each unflattened parameter's tensor
712
+ state into one tensor.
713
+
714
+ NOTE: We use zero tensors for any unflattened parameters without state
715
+ since some value is required to fill those entries. This assumes that the
716
+ zero tensor is mathematically equivalent to having no state, which is true
717
+ for Adam's "exp_avg" and "exp_avg_sq" but may not be true for all
718
+ optimizers.
719
+
720
+ Args:
721
+ state_name (str): Optimizer state name.
722
+ pos_dim_tensors (List[torch.Tensor]): Positive-dimension tensor
723
+ optimizer state values for the unflattened parameters corresponding
724
+ to the single flat parameter.
725
+ unflat_param_names (List[str]): A :class:`list` of unflattened
726
+ parameter names corresponding to the single flat parameter.
727
+ unflat_param_shapes (List[torch.Size]): Unflattened parameter shapes
728
+ corresponding to the single flat parameter.
729
+ handle (FlatParamHandle): The flat parameter's handle.
730
+
731
+ Returns:
732
+ torch.Tensor: A flat tensor containing the optimizer state
733
+ corresponding to ``state_name`` constructed by concatenating the
734
+ unflattened parameter tensor states in ``pos_dim_tensors`` (using zero
735
+ tensors for any unflattened parameters without the state).
736
+ """
737
+ flat_param = handle.flat_param
738
+ non_none_tensors = [t for t in pos_dim_tensors if t is not None]
739
+ # Check that all are tensors with the same dtype
740
+ dtypes = {t.dtype for t in non_none_tensors}
741
+ if len(dtypes) != 1:
742
+ raise ValueError(
743
+ "All unflattened parameters comprising a single flat "
744
+ "parameter must have positive-dimension tensor state with the "
745
+ f"same dtype but got dtypes {dtypes} for state {state_name} and "
746
+ f"unflattened parameter names {unflat_param_names}"
747
+ )
748
+ dtype = next(iter(dtypes))
749
+ # Check that each tensor state matches its parameter's shape
750
+ for tensor, shape in zip(pos_dim_tensors, unflat_param_shapes):
751
+ if tensor is None and len(shape) == 0:
752
+ raise ValueError("Flattening a zero-dimension parameter is not supported")
753
+ elif tensor is not None and tensor.shape != shape:
754
+ raise ValueError(
755
+ "Tensor optimizer state does not have same shape as its "
756
+ f"parameter: {tensor.shape} {shape}"
757
+ )
758
+ # Flatten the tensor states: we do not need to add any right-hand-side
759
+ # padding since the flat optimizer state tensor is sharded via
760
+ # `_get_shard()`, which pads the shard as needed (just like for the flat
761
+ # parameter)
762
+ cpu_device = torch.device("cpu")
763
+ tensors_to_flatten = [
764
+ torch.flatten(state_value.to(cpu_device))
765
+ if state_value is not None
766
+ else torch.flatten(
767
+ torch.zeros(
768
+ size=shape,
769
+ dtype=dtype,
770
+ device=cpu_device,
771
+ )
772
+ )
773
+ for state_value, shape in zip(pos_dim_tensors, unflat_param_shapes)
774
+ ]
775
+ flat_tensor = handle.flatten_tensors(tensors_to_flatten, handle._aligned_numel)
776
+ flat_param_shape = flat_param._unpadded_unsharded_size # type: ignore[attr-defined]
777
+ assert flat_tensor.shape == flat_param_shape, (
778
+ f"tensor optim state: {flat_tensor.shape} "
779
+ f"flat parameter: {flat_param_shape}"
780
+ )
781
+ return flat_tensor
782
+
783
+
784
+ def _flatten_zero_dim_tensor_optim_state(
785
+ state_name: str,
786
+ zero_dim_tensors: List[torch.Tensor],
787
+ unflat_param_names: List[str],
788
+ ) -> torch.Tensor:
789
+ """
790
+ Flattens the zero-dimension tensor optimizer state given by the values
791
+ ``zero_dim_tensors`` for the state ``state_name`` for a single flat
792
+ parameter corresponding to the unflattened parameter names
793
+ ``unflat_param_names`` by enforcing that all tensors are the same and using
794
+ that common value.
795
+
796
+ NOTE: The requirement that the tensors are the same across all unflattened
797
+ parameters comprising the flat parameter is needed to maintain the
798
+ invariant that FSDP performs the same computation as its non-sharded
799
+ equivalent. This means that none of the unflattened parameters can be
800
+ missing this state since imposing a value may differ from having no value.
801
+ For example, for Adam's "step", no value means maximum bias correction,
802
+ while having some positive value means less bias correction.
803
+
804
+ Args:
805
+ state_name (str): Optimizer state name.
806
+ zero_dim_tensors (List[torch.Tensor]): Zero-dimension optimizer state
807
+ for the unflattened parameters corresponding to the single
808
+ flat parameter.
809
+ unflat_param_names (List[str]): A :class:`list` of unflattened
810
+ parameter names corresponding to the single flat parameter.
811
+
812
+ Returns:
813
+ torch.Tensor: A zero-dimensional tensor giving the value of the state
814
+ ``state_name`` for all unflattened parameters corresponding to the
815
+ names ``unflat_param_names``.
816
+ """
817
+ non_none_tensors = [t for t in zero_dim_tensors if t is not None]
818
+ # Enforce that all have the same value and dtype
819
+ values_set = {t.item() if t is not None else None for t in zero_dim_tensors}
820
+ dtypes = {t.dtype if t is not None else None for t in zero_dim_tensors}
821
+ if (
822
+ len(non_none_tensors) != len(zero_dim_tensors)
823
+ or len(values_set) != 1
824
+ or len(dtypes) != 1
825
+ ):
826
+ raise ValueError(
827
+ "All unflattened parameters comprising a single flat "
828
+ "parameter must have scalar state with the same value and dtype "
829
+ f"but got values {values_set} and dtypes {dtypes} for state "
830
+ f"{state_name} and unflattened parameter names "
831
+ f"{unflat_param_names}"
832
+ )
833
+ value = next(iter(values_set))
834
+ dtype = next(iter(dtypes))
835
+ return torch.tensor(value, dtype=dtype, device=torch.device("cpu"))
836
+
837
+
838
+ def _flatten_non_tensor_optim_state(
839
+ state_name: str,
840
+ non_tensors: List[Any],
841
+ unflat_param_names: List[str],
842
+ ) -> Any:
843
+ """
844
+ Flattens the non-tensor optimizer state given by the values ``non_tensors``
845
+ for the state ``state_name`` for a single flat parameter corresponding
846
+ to the unflattened parameter names ``unflat_param_names`` by enforcing that
847
+ all values are the same and using that common value.
848
+
849
+ See the note in :func:`_flatten_zero_dim_tensor_optim_state`.
850
+
851
+ Args:
852
+ state_name (str): Optimizer state name.
853
+ non_tensors (List[Any]): Non-tensor optimizer state for the unflattened
854
+ parameters corresponding to the single flat parameter.
855
+ unflat_param_names (List[str]): A :class:`list` of unflattened
856
+ parameter names corresponding to the single flat parameter.
857
+
858
+ Returns:
859
+ Any: A non-tensor giving the value of the state ``state_name`` for all
860
+ unflattened parameters corresponding to the names
861
+ ``unflat_param_names``.
862
+ """
863
+ non_none_non_tensors = [nt for nt in non_tensors if nt is not None]
864
+ # Enforce that all have the same value (same type already checked)
865
+ non_tensor_set = set(non_tensors)
866
+ if len(non_none_non_tensors) != len(non_tensors) or len(non_tensor_set) != 1:
867
+ raise ValueError(
868
+ "All unflattened parameters comprising a single flat "
869
+ "parameter must have scalar state with the same value and dtype "
870
+ f"but got values {non_tensor_set} for state {state_name} and "
871
+ f"unflattened parameter names {unflat_param_names}"
872
+ )
873
+ non_tensor = next(iter(non_tensor_set))
874
+ return non_tensor
875
+
876
+
877
+ def _rekey_sharded_optim_state_dict(
878
+ sharded_osd: Dict[str, Any],
879
+ model: nn.Module,
880
+ optim: torch.optim.Optimizer,
881
+ optim_input: Optional[
882
+ Union[
883
+ List[Dict[str, Any]],
884
+ Iterable[nn.Parameter],
885
+ ]
886
+ ],
887
+ using_optim_input: bool,
888
+ is_named_optimizer: bool = False,
889
+ ) -> Dict[str, Any]:
890
+ """
891
+ Rekeys the optimizer state dict from unflattened parameter names to flat
892
+ parameter IDs according to the calling rank's ``optim``, which may be
893
+ different across ranks. In particular, the unflattened parameter names are
894
+ represented as :class:`_OptimStateKey` s.
895
+ """
896
+ param_to_fqns = _get_param_to_fqns(model)
897
+ flat_param_to_fqn = _get_flat_param_to_fqn(model)
898
+ param_to_param_key: Dict[nn.Parameter, Union[int, str]] = cast(
899
+ Dict[nn.Parameter, Union[int, str]],
900
+ (
901
+ _get_param_to_param_id_from_optim_input(model, optim_input)
902
+ if using_optim_input
903
+ else _get_param_to_param_key(
904
+ optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
905
+ )
906
+ ),
907
+ )
908
+ # All parameter keys in `param_to_param_key` should be in
909
+ # `param_to_fqns` -- strict inequality follows when not all parameters are
910
+ # passed to the optimizer
911
+ assert len(param_to_param_key) <= len(param_to_fqns)
912
+
913
+ unflat_param_names_to_flat_param_key: Dict[
914
+ Tuple[str, ...], Union[int, str]
915
+ ] = {} # for "state"
916
+ unflat_param_name_to_flat_param_key: Dict[
917
+ str, Union[int, str]
918
+ ] = {} # for "param_groups"
919
+ for param, unflat_param_names in param_to_fqns.items():
920
+ if param not in param_to_param_key:
921
+ # This parameter was not passed to the optimizer
922
+ continue
923
+ flat_param_key = param_to_param_key[param]
924
+ unflat_param_names_to_flat_param_key[tuple(unflat_param_names)] = flat_param_key
925
+ for unflat_param_name in unflat_param_names:
926
+ unflat_param_name_to_flat_param_key[unflat_param_name] = flat_param_key
927
+
928
+ sharded_osd_state = sharded_osd["state"]
929
+ rekeyed_osd_state: Dict[Union[str, int], Any] = {}
930
+ for key, param_state in sharded_osd_state.items():
931
+ if isinstance(key, str):
932
+ rekeyed_osd_state[key] = param_state
933
+ continue
934
+ flat_param_key = unflat_param_names_to_flat_param_key.get(
935
+ key.unflat_param_names, key.unflat_param_names
936
+ )
937
+ rekeyed_osd_state[flat_param_key] = param_state
938
+
939
+ # Only process param_groups if it exists in sharded_osd
940
+ if "param_groups" in sharded_osd:
941
+ rekeyed_osd_param_groups: List[Dict[str, Any]] = []
942
+ for unflat_param_group in sharded_osd["param_groups"]:
943
+ flat_param_group = copy.deepcopy(unflat_param_group)
944
+ flat_param_keys = sorted(
945
+ {
946
+ unflat_param_name_to_flat_param_key[unflat_param_name]
947
+ for unflat_param_name in unflat_param_group["params"]
948
+ }
949
+ )
950
+ flat_param_group["params"] = flat_param_keys
951
+ rekeyed_osd_param_groups.append(flat_param_group)
952
+ return {"state": rekeyed_osd_state, "param_groups": rekeyed_osd_param_groups}
953
+ else:
954
+ return {"state": rekeyed_osd_state}
955
+
956
+
957
+ def _get_param_id_to_param_from_optim_input(
958
+ model: nn.Module,
959
+ optim_input: Optional[
960
+ Union[
961
+ List[Dict[str, Any]],
962
+ Iterable[nn.Parameter],
963
+ ]
964
+ ] = None,
965
+ ) -> Dict[int, nn.Parameter]:
966
+ """
967
+ Constructs a mapping from parameter IDs to parameters. This may be used
968
+ both for models with ``FlatParameter`` s and without.
969
+
970
+ NOTE: This method is only preserved for backward compatibility. The method
971
+ :meth:`_get_param_key_to_param` is the preferred code path that does not
972
+ rely on ``optim_input``.
973
+
974
+ NOTE: We critically assume that, whether the optimizer input is a list of
975
+ parameters or a list of parameter groups, :class:`torch.optim.Optimizer`
976
+ enumerates the parameter IDs in order. In other words, for a parameter list
977
+ input, the parameter IDs should be in that list order, and for a parameter
978
+ groups input, the parameter IDs should be in order within each parameter
979
+ group and in order across parameter groups.
980
+
981
+ Args:
982
+ model (nn.Module): Model whose parameters are passed into the
983
+ optimizer.
984
+ optim_input (Optional[Union[List[Dict[str, Any]],
985
+ Iterable[nn.Parameter]]]): Input passed into the optimizer
986
+ representing either a :class:`list` of parameter groups or an
987
+ iterable of parameters; if ``None``, then this method assumes the
988
+ input was ``model.parameters()``. (Default: ``None``)
989
+
990
+ Returns:
991
+ List[nn.Parameter]: Mapping from parameter IDs to parameters,
992
+ where the parameter ID is implicitly the index in the :class:`list`.
993
+ """
994
+ # Assume the standard case of passing `model.parameters()` to the optimizer
995
+ # if `optim_input` is not specified
996
+ if optim_input is None:
997
+ return dict(enumerate(model.parameters()))
998
+ try:
999
+ params = cast(List[nn.Parameter], list(optim_input))
1000
+ except TypeError as e:
1001
+ raise TypeError(
1002
+ "Optimizer input should be an iterable of Tensors or dicts, "
1003
+ f"but got {optim_input}"
1004
+ ) from e
1005
+ if len(params) == 0:
1006
+ raise ValueError("Optimizer input should not be empty")
1007
+
1008
+ # Check if the optimizer input represents tensors or parameter groups
1009
+ all_tensors = True
1010
+ all_dicts = True
1011
+ for param in params:
1012
+ all_tensors &= isinstance(param, torch.Tensor)
1013
+ all_dicts &= isinstance(param, dict)
1014
+ if not all_tensors and not all_dicts:
1015
+ raise TypeError("Optimizer input should be an iterable of Tensors or dicts")
1016
+ if all_tensors:
1017
+ return dict(enumerate(params))
1018
+ assert all_dicts
1019
+ param_id_to_param: List[nn.Parameter] = []
1020
+ for param_group in params:
1021
+ has_params_key = "params" in param_group # type: ignore[operator]
1022
+ assert has_params_key, (
1023
+ 'A parameter group should map "params" to a list of the '
1024
+ "parameters in the group"
1025
+ )
1026
+ # Implicitly map `flat_param_id` (current length of the list) to
1027
+ # `param`
1028
+ param_id_to_param.extend(param_group["params"]) # type: ignore[index]
1029
+ return dict(enumerate(param_id_to_param))
1030
+
1031
+
1032
+ def _get_flat_param_to_fqn(model: torch.nn.Module) -> Dict[FlatParameter, str]:
1033
+ """
1034
+ Constructs a mapping from ``FlatParameter`` to a cleaned (devoid of prefixes
1035
+ from wrappers) fully qualified name (FQN). Note that this FQN is "non-canonical"
1036
+ because ``FlatParameter`` s do not come from the original module but are
1037
+ registered only after FSDP has been applied. This function returns the FSDP-given
1038
+ name for the ``FlatParameter`` (usually module._flat_param) as opposed to the
1039
+ canonical FQNs returned for ``FlatParameter`` s in ``_common_utils._get_param_to_fqns(...)``).
1040
+
1041
+ Consequently, this function will only return a non-empty mapping if FSDP was
1042
+ applied with ``use_orig_params=False`` as, otherwise, the original parameters
1043
+ are used within the module and there would be no ``FlatParameter`` s in the module.
1044
+
1045
+ """
1046
+
1047
+ def module_fn(module, prefix, tree_level, flat_param_to_fqn):
1048
+ for param_name, param in _named_parameters_with_duplicates(
1049
+ module, recurse=False
1050
+ ):
1051
+ if not isinstance(param, FlatParameter):
1052
+ continue
1053
+ fqn = clean_tensor_name(prefix + param_name)
1054
+ flat_param_to_fqn[param] = fqn
1055
+
1056
+ def return_fn(flat_param_to_fqn):
1057
+ return flat_param_to_fqn
1058
+
1059
+ flat_param_to_fqn_ret: Dict[FlatParameter, str] = {}
1060
+ return _apply_to_modules(
1061
+ model,
1062
+ module_fn,
1063
+ return_fn,
1064
+ [fqn for fqn, _ in _named_parameters_with_duplicates(model)],
1065
+ flat_param_to_fqn_ret,
1066
+ )
1067
+
1068
+
1069
+ def _get_param_key_to_param(
1070
+ optim: torch.optim.Optimizer,
1071
+ model: Optional[nn.Module] = None,
1072
+ is_named_optimizer: bool = False,
1073
+ param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None,
1074
+ flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None,
1075
+ ) -> Dict[Union[int, str], nn.Parameter]:
1076
+ """
1077
+ Constructs a mapping from parameter keys to parameters. For the regular
1078
+ optimizers, the keys are parameter IDs. For NamedOptimizer, the keys
1079
+ are FQNs. This API may be used both for models with ``FlatParameter`` s and
1080
+ without.
1081
+ """
1082
+ clean_fqn_to_curr_fqn: Dict[str, str] = {}
1083
+ if is_named_optimizer:
1084
+ assert (
1085
+ param_to_fqns is not None and flat_param_to_fqn is not None
1086
+ ), "The optimizer is a NamedOptimizer, `param_to_fqns` must not be None."
1087
+ assert model is not None
1088
+ for key, _ in _named_parameters_with_duplicates(model):
1089
+ clean_fqn_to_curr_fqn[clean_tensor_name(key)] = key
1090
+
1091
+ param_key_to_param: Dict[Union[str, int], nn.Parameter] = {}
1092
+ pid = 0
1093
+ for param_group in optim.param_groups:
1094
+ if is_named_optimizer:
1095
+ for param in param_group["params"]:
1096
+ assert flat_param_to_fqn is not None
1097
+ if param in flat_param_to_fqn:
1098
+ # FlatParameter case
1099
+ key = flat_param_to_fqn[param]
1100
+ else:
1101
+ assert param_to_fqns is not None
1102
+ # use_orig_params case
1103
+ assert len(param_to_fqns[param]) == 1
1104
+ key = param_to_fqns[param][0]
1105
+ try:
1106
+ key = clean_fqn_to_curr_fqn[key]
1107
+ except KeyError as e:
1108
+ raise KeyError(
1109
+ f"Can't find {key} from {list(clean_fqn_to_curr_fqn.keys())}."
1110
+ ) from e
1111
+ param_key_to_param[key] = param
1112
+ else:
1113
+ for param in param_group["params"]:
1114
+ param_key_to_param[pid] = param
1115
+ pid += 1
1116
+
1117
+ return param_key_to_param
1118
+
1119
+
1120
+ def _get_param_to_param_key(
1121
+ optim: torch.optim.Optimizer,
1122
+ model: Optional[nn.Module] = None,
1123
+ is_named_optimizer: bool = False,
1124
+ param_to_fqns: Optional[Dict[nn.Parameter, List[str]]] = None,
1125
+ flat_param_to_fqn: Optional[Dict[FlatParameter, str]] = None,
1126
+ ) -> Dict[nn.Parameter, Union[int, str]]:
1127
+ """
1128
+ Constructs the inverse mapping of :func:`_get_param_key_to_param`. This API
1129
+ only supports the case where `optim` is a regular optimizer, not NamedOptimizer.
1130
+ So the parameter keys will be parameter ids.
1131
+ """
1132
+ param_id_to_param = _get_param_key_to_param(
1133
+ optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
1134
+ )
1135
+ return {param: param_id for param_id, param in param_id_to_param.items()}
1136
+
1137
+
1138
+ def _get_param_to_param_id_from_optim_input(
1139
+ model: nn.Module,
1140
+ optim_input: Optional[
1141
+ Union[
1142
+ List[Dict[str, Any]],
1143
+ Iterable[nn.Parameter],
1144
+ ]
1145
+ ] = None,
1146
+ ) -> Dict[nn.Parameter, int]:
1147
+ """Constructs the inverse mapping of :func:`_get_param_id_to_param_from_optim_input`."""
1148
+ param_id_to_param = _get_param_id_to_param_from_optim_input(model, optim_input)
1149
+ return {param: param_id for param_id, param in param_id_to_param.items()}
1150
+
1151
+
1152
+ def _check_missing_keys_on_rank(
1153
+ r0_optim_state_keys: List[_OptimStateKey],
1154
+ optim_state_key_to_param_key: Dict[_OptimStateKey, Union[str, int]],
1155
+ param_key_to_param: Dict[Union[str, int], nn.Parameter],
1156
+ group: Optional[dist.ProcessGroup],
1157
+ ) -> None:
1158
+ # Ensure that all ranks have at least the optimizer states needed by
1159
+ # rank 0's optimizer
1160
+ missing_keys: List[_OptimStateKey] = []
1161
+ for r0_optim_state_key in r0_optim_state_keys:
1162
+ if r0_optim_state_key not in optim_state_key_to_param_key:
1163
+ # A parameter from rank 0's optimizer does not exist for this
1164
+ # rank's optimizer
1165
+ missing_keys.append(r0_optim_state_key)
1166
+ continue
1167
+ param_key = optim_state_key_to_param_key[r0_optim_state_key]
1168
+ if isinstance(param_key, int):
1169
+ assert param_key >= 0 and param_key < len(
1170
+ param_key_to_param
1171
+ ), "Check the `param_key_to_param` construction"
1172
+ # We cannot use FSDPState.compute_device as this API is a global view.
1173
+ device = _get_pg_default_device(group)
1174
+ num_missing = torch.tensor([len(missing_keys)], dtype=torch.int32, device=device)
1175
+ dist.all_reduce(num_missing, group=group)
1176
+ if num_missing.item() > 0:
1177
+ obj_list = [None for _ in range(dist.get_world_size(group))]
1178
+ dist.all_gather_object(obj_list, missing_keys, group=group)
1179
+ error_msg = (
1180
+ "FSDP currently requires each rank to have at least the "
1181
+ "optimizer states needed by rank 0's optimizer but some ranks "
1182
+ "are missing some of those states"
1183
+ )
1184
+ for rank, keys in enumerate(obj_list):
1185
+ keys = cast(List[_OptimStateKey], keys)
1186
+ if len(keys) > 0:
1187
+ error_msg += (
1188
+ f"\nRank {rank} is missing states for the parameters: "
1189
+ f"{[key.unflat_param_names for key in keys]}"
1190
+ )
1191
+ raise RuntimeError(error_msg)
1192
+
1193
+
1194
+ def _map_param_key_to_optim_keys(
1195
+ optim_state_dict: Dict[str, Any],
1196
+ group: Optional[dist.ProcessGroup],
1197
+ param_key_to_param: Dict[Union[int, str], nn.Parameter],
1198
+ param_to_fqns: Dict[nn.Parameter, List[str]],
1199
+ fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo],
1200
+ merge_keys: bool = False,
1201
+ ) -> Tuple[List[_OptimStateKey], Dict[_OptimStateKey, Union[int, str]]]:
1202
+ """
1203
+ Construct the local mapping between the ``_OptimStateKey`` and parameter keys
1204
+ and all the ``_OptimStateKey`` across ranks. If ``merge_keys`` is False, rank0
1205
+ must contain all the ``_OptimStateKey``, an exception will be raised otherwise.
1206
+ Note that ``merge_keys`` should equal to ``use_orig_params``.
1207
+ """
1208
+ rank = dist.get_rank(group)
1209
+ optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]] = {} # local
1210
+ all_optim_state_keys: List[_OptimStateKey] = []
1211
+
1212
+ for param_key, param in param_key_to_param.items():
1213
+ # Do not include parameters without state to avoid empty mappings
1214
+ # just like in normal `torch.optim.Optimizer.state_dict()`
1215
+ if param_key not in optim_state_dict["state"]:
1216
+ continue
1217
+ fqns = param_to_fqns[param]
1218
+ is_fsdp_managed = isinstance(param, FlatParameter)
1219
+ if is_fsdp_managed:
1220
+ assert fqns[0] in fqn_to_fsdp_param_info, (
1221
+ fqns[0],
1222
+ list(fqn_to_fsdp_param_info.keys()),
1223
+ )
1224
+ is_fsdp_managed = fqns[0] in fqn_to_fsdp_param_info
1225
+ optim_state_key = _OptimStateKey(
1226
+ unflat_param_names=tuple(fqns),
1227
+ is_fsdp_managed=is_fsdp_managed,
1228
+ )
1229
+ if rank == 0 or merge_keys:
1230
+ all_optim_state_keys.append(optim_state_key)
1231
+ optim_state_key_to_param_key[optim_state_key] = param_key
1232
+
1233
+ if merge_keys:
1234
+ all_keys: List[List[_OptimStateKey]] = [
1235
+ [] for _ in range(dist.get_world_size(group))
1236
+ ]
1237
+ dist.all_gather_object(all_keys, all_optim_state_keys, group=group)
1238
+ merge_all_optim_state_keys = [
1239
+ key for local_keys in all_keys for key in local_keys
1240
+ ]
1241
+ all_optim_state_keys = sorted(set(merge_all_optim_state_keys))
1242
+ else:
1243
+ key_obj_list: List[Optional[List[_OptimStateKey]]] = (
1244
+ [all_optim_state_keys] if rank == 0 else [None]
1245
+ )
1246
+ dist.broadcast_object_list(key_obj_list, src=0, group=group)
1247
+ assert key_obj_list[0] is not None
1248
+ all_optim_state_keys = key_obj_list[0]
1249
+ _check_missing_keys_on_rank(
1250
+ all_optim_state_keys,
1251
+ optim_state_key_to_param_key,
1252
+ param_key_to_param,
1253
+ group,
1254
+ )
1255
+
1256
+ return all_optim_state_keys, optim_state_key_to_param_key
1257
+
1258
+
1259
+ def _unflatten_param_groups(
1260
+ state_dict: Dict[str, Any],
1261
+ param_key_to_param: Dict[Union[int, str], nn.Parameter],
1262
+ param_to_fqns: Dict[nn.Parameter, List[str]],
1263
+ ) -> List[Dict[str, Any]]:
1264
+ param_groups: List[Dict[str, Any]] = []
1265
+ for flat_param_group in state_dict["param_groups"]:
1266
+ unflat_param_group = copy.deepcopy(flat_param_group)
1267
+ param_group_params = [
1268
+ param_key_to_param[flat_param_key]
1269
+ for flat_param_key in flat_param_group["params"]
1270
+ ]
1271
+ nested_unflat_param_names = [
1272
+ param_to_fqns[param] for param in param_group_params
1273
+ ]
1274
+ unflat_param_group["params"] = [
1275
+ unflat_param_name
1276
+ for unflat_param_names in nested_unflat_param_names
1277
+ for unflat_param_name in unflat_param_names
1278
+ ] # flatten the list of lists
1279
+ param_groups.append(unflat_param_group)
1280
+ return param_groups
1281
+
1282
+
1283
+ def _is_named_optimizer(optim_state_dict: Dict[str, Any]) -> bool:
1284
+ """
1285
+ Returns whether the state_dict is from a NamedOptimizer.
1286
+ This function checks that the keys in the state_dict['state'] are strings
1287
+ (which usually are FQNs) versus integers (which usually refer to param_ids
1288
+ from a vanilla torch.optim.Optimizer).
1289
+ """
1290
+ state = optim_state_dict.get("state", None)
1291
+ if not state:
1292
+ # If we cannot find a state, assume it is not NamedOptimizer as
1293
+ # NamedOptimizer has eager initialization.
1294
+ return False
1295
+ try:
1296
+ key = next(iter(state.keys()))
1297
+ except Exception as e:
1298
+ raise Exception(optim_state_dict) from e # noqa: TRY002
1299
+ return isinstance(key, str)
1300
+
1301
+
1302
+ @dataclass
1303
+ class StateInfo:
1304
+ # The key of these dictionaries are the state name, e.g., `exp_avg`.
1305
+ tensors: Dict[str, _PosDimTensorInfo]
1306
+ scalar_tensors: Dict[str, torch.Tensor]
1307
+ non_tensors: Dict[str, Any]
1308
+
1309
+
1310
+ def _allgather_state_info(
1311
+ fsdp_state: _FSDPState,
1312
+ input_states: Dict[str, Any],
1313
+ ) -> List[Dict[str, StateInfo]]:
1314
+ """
1315
+ Given the ``input_states``, allgather StateInfo for each state. The function
1316
+ uses all_gather_object to gather StateInfo so no GPU tensors are sent.
1317
+ """
1318
+
1319
+ processed_state_dict: Dict[str, StateInfo] = {}
1320
+ gathered_state_info: List[Dict[str, StateInfo]] = [
1321
+ {} for _ in range(fsdp_state.world_size)
1322
+ ]
1323
+
1324
+ for fqn, optim_state in input_states.items():
1325
+ # Allgather the scalar tensor state, non-tensor states and tensors metadata.
1326
+ processed_state = StateInfo({}, {}, {})
1327
+ for state_name, value in sorted_items(optim_state):
1328
+ if torch.is_tensor(value):
1329
+ if value.dim() == 0:
1330
+ # Ensure that `step` is on CPU.
1331
+ processed_state.scalar_tensors[state_name] = value.cpu()
1332
+ else:
1333
+ processed_state.tensors[state_name] = _PosDimTensorInfo(
1334
+ value.shape, value.dtype
1335
+ )
1336
+ else:
1337
+ processed_state.non_tensors[state_name] = value
1338
+ processed_state_dict[fqn] = processed_state
1339
+ dist.all_gather_object(
1340
+ gathered_state_info,
1341
+ processed_state_dict,
1342
+ group=fsdp_state.process_group,
1343
+ )
1344
+ return gathered_state_info
1345
+
1346
+
1347
+ def _convert_all_state_info(
1348
+ fsdp_param_info: FSDPParamInfo,
1349
+ gathered_state_info: List[Dict[str, StateInfo]],
1350
+ input_states: Dict[str, Any],
1351
+ output_states: Dict[str, Dict[str, Any]],
1352
+ ) -> Tuple[Optional[torch.dtype], Dict[str, List[Optional[torch.Tensor]]]]:
1353
+ """
1354
+ Given the ``gathered_state_info`` and ``input_states``, the API converted
1355
+ the StateInfo into the original state if the state is not a non-scalar
1356
+ tensor. For a multi-dimensional tensor, the local state will be stored in
1357
+ ``state_buffer`` in a correct order for later allgather purpose.
1358
+ """
1359
+
1360
+ state_buffers: Dict[str, List[Optional[torch.Tensor]]] = {}
1361
+
1362
+ for fqn, gathered_state in output_states.items():
1363
+ state_info = [s[fqn] for s in gathered_state_info]
1364
+ all_tensor_states = sorted(
1365
+ {n for state in state_info for n in state.tensors.keys()}
1366
+ )
1367
+ empty_ranks: Set[int] = set()
1368
+ dtype: Optional[torch.dtype] = None
1369
+ # First check all the non-scalar states and get the information of
1370
+ # states on each rank.
1371
+ for state_name in all_tensor_states:
1372
+ numels = []
1373
+ _empty_ranks: Set[int] = set()
1374
+ for rank, object_state in enumerate(state_info):
1375
+ numels.append(0)
1376
+ info = object_state.tensors.get(state_name, None)
1377
+ if info is not None:
1378
+ numels[-1] = info.shape.numel()
1379
+ if not dtype:
1380
+ dtype = info.dtype
1381
+ else:
1382
+ assert dtype == info.dtype
1383
+ if numels[-1] == 0:
1384
+ _empty_ranks.add(rank)
1385
+
1386
+ assert not empty_ranks or empty_ranks == _empty_ranks
1387
+ empty_ranks = _empty_ranks
1388
+ if state_name not in state_buffers:
1389
+ state_buffers[state_name] = [
1390
+ None for _ in fsdp_param_info.param_indices
1391
+ ]
1392
+ local_state = input_states[fqn].get(state_name, None)
1393
+ # N.B. We need to move the state to compute_device. The reason is
1394
+ # not yet clear and we need to figure out why the state may be on a
1395
+ # different device.
1396
+ if local_state is not None:
1397
+ local_state = local_state.to(fsdp_param_info.state.compute_device)
1398
+ state_buffers[state_name][fsdp_param_info.param_indices[fqn]] = local_state
1399
+
1400
+ # Restoring the scalar and non-tensor states. If the corresponding
1401
+ # non-scalar states do not exist on the rank, we also skip the scalar
1402
+ # non-tensor states on that rank.
1403
+ for rank, object_state in enumerate(state_info):
1404
+ if rank in empty_ranks:
1405
+ continue
1406
+ for name, non_tensor_value in object_state.non_tensors.items():
1407
+ curr_non_tensor_value = gathered_state.get(name, None)
1408
+ assert (
1409
+ curr_non_tensor_value is None
1410
+ or curr_non_tensor_value == non_tensor_value
1411
+ ), (
1412
+ f"Rank {rank} has different values for {name}: {non_tensor_value}."
1413
+ + f" Other ranks: {curr_non_tensor_value}"
1414
+ )
1415
+ gathered_state[name] = non_tensor_value
1416
+
1417
+ for name, scalar_tensor_value in object_state.scalar_tensors.items():
1418
+ curr_scalar_tensor_value = gathered_state.get(name, None)
1419
+ assert curr_scalar_tensor_value is None or torch.equal(
1420
+ scalar_tensor_value, curr_scalar_tensor_value
1421
+ ), (
1422
+ f"Rank {rank} has different values for {name}: {scalar_tensor_value}."
1423
+ + f" Other ranks: {curr_scalar_tensor_value}"
1424
+ )
1425
+ gathered_state[name] = scalar_tensor_value
1426
+
1427
+ return dtype, state_buffers # type: ignore[possibly-undefined]
1428
+
1429
+
1430
+ def _unflatten_orig_param_states(
1431
+ fsdp_param_info: FSDPParamInfo,
1432
+ output_states: Dict[str, Dict[str, Any]],
1433
+ state_name: str,
1434
+ shard_state: bool,
1435
+ to_save: bool,
1436
+ cpu_offload: bool,
1437
+ ) -> None:
1438
+ """
1439
+ Given a output state dict, ``output_states``, which the keys are FQNs to the
1440
+ original parameters (not FlatParameters nor parmeter ID), and the values
1441
+ are gathered states, unflatten the states to the original dimensions.
1442
+
1443
+ This function performs the unflattening process in-place.
1444
+ """
1445
+ if not to_save:
1446
+ return
1447
+ flat_param = fsdp_param_info.handle.flat_param
1448
+ fsdp_state = fsdp_param_info.state
1449
+ for fqn, gathered_state in output_states.items():
1450
+ value = gathered_state[state_name]
1451
+ param_idx = fsdp_param_info.param_indices[fqn]
1452
+
1453
+ # TODO: This solution is not general and only apply to PTD TP solution.
1454
+ if isinstance(value, DTensor):
1455
+ placement = value.placements[0]
1456
+ # If gathered state is a DTensor and its TP placement is not Replicate(), we need to
1457
+ # gather the tensor on its TP dimension before chunking them into DTensor again.
1458
+ if placement != Replicate():
1459
+ placement_dim = placement.dim # type: ignore[attr-defined]
1460
+ value_local = value.redistribute(placements=(Replicate(),))
1461
+ reshape_size = list(flat_param._shapes[param_idx])
1462
+ reshape_size[placement_dim] *= value.device_mesh.size(0)
1463
+ reshape_size = torch.Size(reshape_size)
1464
+ value = value.reshape(reshape_size)
1465
+ # If gathered state is a replicate DTensor, we directly reshape it.
1466
+ else:
1467
+ value = value.reshape(flat_param._shapes[param_idx])
1468
+ else:
1469
+ # If gathered state is a tensor, we directly reshape it into unflatten state.
1470
+ value = value.reshape(flat_param._shapes[param_idx])
1471
+
1472
+ if shard_state:
1473
+ osd_config = fsdp_state._optim_state_dict_config
1474
+ if getattr(osd_config, "_use_dtensor", False):
1475
+ assert fsdp_state._device_mesh is not None
1476
+ value = _ext_chunk_dtensor(
1477
+ value,
1478
+ fsdp_state.rank,
1479
+ fsdp_state._device_mesh,
1480
+ fsdp_state._fsdp_extension,
1481
+ )
1482
+ else:
1483
+ assert fsdp_state.process_group is not None
1484
+ value = _ext_chunk_tensor(
1485
+ value,
1486
+ fsdp_state.rank,
1487
+ fsdp_state.world_size,
1488
+ fsdp_state._device_handle.device_count(),
1489
+ fsdp_state.process_group,
1490
+ fsdp_state._fsdp_extension,
1491
+ )
1492
+ elif not cpu_offload:
1493
+ with SimpleProfiler.profile("clone"):
1494
+ value = value.detach().clone()
1495
+
1496
+ if cpu_offload:
1497
+ with SimpleProfiler.profile(SimpleProfiler.Type.D2H):
1498
+ value = value.cpu()
1499
+ gathered_state[state_name] = value
1500
+
1501
+
1502
+ def _allgather_orig_param_states(
1503
+ fsdp_param_info: FSDPParamInfo,
1504
+ gathered_state_info: List[Dict[str, StateInfo]],
1505
+ input_states: Dict[str, Any],
1506
+ shard_state: bool,
1507
+ to_save: bool,
1508
+ cpu_offload: bool,
1509
+ ) -> Dict[str, Dict[str, Any]]:
1510
+ """
1511
+ Given the ``gathered_state_info`` and ``input_states``, the API allgathers
1512
+ all tensor states and restore non-tensor states from ``gathered_state_info``.
1513
+ """
1514
+ fsdp_state = fsdp_param_info.state
1515
+ if fsdp_state.rank == 0 and dist.get_debug_level() == dist.DebugLevel.DETAIL:
1516
+ logger.info(
1517
+ "Memory Summary before calling to _allgather_orig_param_states %s",
1518
+ fsdp_state._device_handle.memory_summary(),
1519
+ )
1520
+
1521
+ output_states: Dict[str, Dict[str, Any]] = {fqn: {} for fqn in input_states.keys()}
1522
+
1523
+ dtype, state_buffers = _convert_all_state_info(
1524
+ fsdp_param_info, gathered_state_info, input_states, output_states
1525
+ )
1526
+
1527
+ if len(state_buffers) == 0:
1528
+ return output_states
1529
+
1530
+ has_state_params: List[bool] = [
1531
+ True if fqn in output_states else False
1532
+ for fqn, idx in fsdp_param_info.param_indices.items()
1533
+ ]
1534
+
1535
+ # Loop through the ``state_buffers`` and construct the flattened, concatenated,
1536
+ # sharded states. The size of the constructed state will be the same size as
1537
+ # flat_param (also sharded).
1538
+ # Then we perform an allgather_into_tensor to get the full flat_param state.
1539
+ # The full flat_param state is the result of concatenation of multiple states
1540
+ # the order of of flat_param._fqns.
1541
+ # The final step is to split the flat_param state into original param states
1542
+ # and return the result.
1543
+ flat_param = fsdp_param_info.handle.flat_param
1544
+ empty_func = functools.partial(
1545
+ torch.empty, dtype=dtype, device=fsdp_state.compute_device
1546
+ )
1547
+ gathered_tensor = empty_func(flat_param._padded_unsharded_size)
1548
+ # Synchronize can be slow but this will be easier for us to debug.
1549
+ fsdp_state._device_handle.synchronize()
1550
+ for state_name, buffers in state_buffers.items():
1551
+ local_buffers: List[torch.Tensor] = []
1552
+ begin = fsdp_state.rank * flat_param._sharded_size.numel()
1553
+ # End is inclusive.
1554
+ end = begin + flat_param._sharded_size.numel() - 1
1555
+ # param_idx corresponds to the parameter index in the FlatParameter.
1556
+ mem_offset, param_idx = 0, 0
1557
+ for numel, is_padding in zip(
1558
+ flat_param._numels_with_padding, flat_param._is_padding_mask
1559
+ ):
1560
+ frozen_and_no_state = not is_padding and (
1561
+ not fsdp_param_info.param_requires_grad[param_idx]
1562
+ and not has_state_params[param_idx]
1563
+ )
1564
+
1565
+ if is_padding or frozen_and_no_state:
1566
+ # This memory range is a padding or the param is frozen and does
1567
+ # not require gradient. For the later case, we treat it as a
1568
+ # padding and add empty values to the local_buffers.
1569
+
1570
+ padding_begin, padding_end = mem_offset, mem_offset + numel - 1
1571
+ if padding_begin <= begin <= padding_end:
1572
+ # The range is an align padding before the first parameter in
1573
+ # the shard. The shard includes parts of this align padding.
1574
+ padding_len = (
1575
+ padding_end - begin + 1
1576
+ if end >= padding_end
1577
+ else end - begin + 1
1578
+ )
1579
+ elif padding_begin <= end <= padding_end:
1580
+ # The range is an align padding after the last parameter in
1581
+ # the shard. The shard includes parts of this align padding.
1582
+ padding_len = (
1583
+ end - padding_begin + 1
1584
+ if begin <= padding_begin
1585
+ else end - begin + 1
1586
+ )
1587
+ elif begin < padding_begin <= padding_end < end:
1588
+ # The range is an align padding that is completely in the
1589
+ # shard.
1590
+ padding_len = numel
1591
+ else:
1592
+ padding_len = 0
1593
+ if padding_len:
1594
+ local_buffers.append(empty_func(padding_len))
1595
+
1596
+ if not is_padding:
1597
+ # This memory range is a parameter in FlatParameter. So there
1598
+ # should be an corresponding state in the optimizer unless the
1599
+ # parameter is frozen, which we treat it as a padding above.
1600
+
1601
+ # We need to check if this rank owns the buffer. If this is None:
1602
+ # 1.) the rank does not own any part of the original parameter.
1603
+ # As a result, there is no corresponding optimizer state on
1604
+ # the rank as well.
1605
+ # 2.) the parameter is frozen AND no optimizer state for the
1606
+ # parameter. If a parameter is frozen, there can still be
1607
+ # optimizer state if the parameter is not frozen in the
1608
+ # previous steps.
1609
+ if buffers[param_idx] is not None:
1610
+ local_buffers.append(cast(torch.Tensor, buffers[param_idx]))
1611
+ param_idx += 1
1612
+
1613
+ mem_offset += numel
1614
+
1615
+ shard_numel_padded = flat_param._sharded_size.numel() - (
1616
+ sum(t.numel() for t in local_buffers)
1617
+ )
1618
+
1619
+ assert flat_param._shard_numel_padded == shard_numel_padded, (
1620
+ "Manually calculated _sharded_numel_padded is incorrect. "
1621
+ f"_shard_numel_padded={flat_param._shard_numel_padded}, "
1622
+ f"shard_numel_padded={shard_numel_padded}, "
1623
+ f"_sharded_size.numel={flat_param._sharded_size.numel()}, "
1624
+ f"_numels_with_padding={flat_param._numels_with_padding}, "
1625
+ f"begin={begin}, end={end},"
1626
+ )
1627
+ if shard_numel_padded > 0:
1628
+ # Add right-handed padding.
1629
+ local_buffers.append(empty_func(shard_numel_padded))
1630
+ local_shard = torch.cat(local_buffers)
1631
+ assert local_shard.numel() * fsdp_state.world_size == gathered_tensor.numel(), (
1632
+ "The size of local shard times the world size should equal to the "
1633
+ "gathered tensor size. The inconsistency may be from a bug of "
1634
+ "FlatParameter's metadata or the reconstruction logic in optimizer "
1635
+ "state dict."
1636
+ )
1637
+ fsdp_state._device_handle.synchronize()
1638
+ with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER):
1639
+ dist.all_gather_into_tensor(
1640
+ gathered_tensor, local_shard, group=fsdp_state.process_group
1641
+ )
1642
+ # Synchronize can be slow but this will be easier for us to debug.
1643
+ fsdp_state._device_handle.synchronize()
1644
+
1645
+ unpadded_tensor = gathered_tensor[: flat_param._unpadded_unsharded_size.numel()]
1646
+ flat_param_handle = fsdp_param_info.handle
1647
+ orig_states = flat_param_handle._get_unflat_views_aligned(unpadded_tensor)
1648
+ assert len(orig_states) == len(fsdp_param_info.param_indices), (
1649
+ "The number of parameters from FlatParameter is not consistent to "
1650
+ "the number of states used by optimizer state dict reconstruction "
1651
+ "logic."
1652
+ )
1653
+ for fqn, idx in fsdp_param_info.param_indices.items():
1654
+ if fsdp_param_info.param_requires_grad[idx] or fqn in output_states:
1655
+ output_states[fqn][state_name] = orig_states[idx]
1656
+
1657
+ _unflatten_orig_param_states(
1658
+ fsdp_param_info,
1659
+ output_states,
1660
+ state_name,
1661
+ shard_state,
1662
+ to_save,
1663
+ cpu_offload,
1664
+ )
1665
+
1666
+ del gathered_tensor
1667
+ return output_states
1668
+
1669
+
1670
+ def _gather_all_orig_param_state(
1671
+ fsdp_param_info: FSDPParamInfo,
1672
+ input_states: Dict[str, Any],
1673
+ shard_state: bool,
1674
+ to_save: bool,
1675
+ cpu_offload: bool,
1676
+ ) -> Dict[str, Any]:
1677
+ """
1678
+ Given a optimizer state dict, ``input_states``, which the keys are FQNs to the
1679
+ original parameters (not FlatParameters nor parmeter ID), gather all the
1680
+ states and unflatten them to the original dimensions. Note that all the
1681
+ params referred by the ``input_states`` must be managed by FSDP.
1682
+ """
1683
+ fsdp_state = fsdp_param_info.state
1684
+ if (
1685
+ fsdp_state.world_size == 1
1686
+ or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD
1687
+ ):
1688
+ return input_states if to_save else {}
1689
+
1690
+ with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING):
1691
+ with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER_OBJ):
1692
+ gathered_state_info = _allgather_state_info(fsdp_state, input_states)
1693
+ output_states = _allgather_orig_param_states(
1694
+ fsdp_param_info,
1695
+ gathered_state_info,
1696
+ input_states,
1697
+ shard_state,
1698
+ to_save,
1699
+ cpu_offload,
1700
+ )
1701
+ if to_save:
1702
+ for key, idx in fsdp_param_info.param_indices.items():
1703
+ if key in output_states:
1704
+ continue
1705
+ if not fsdp_param_info.param_requires_grad[idx]:
1706
+ continue
1707
+
1708
+ raise RuntimeError(
1709
+ f"{key} is not in the output state. "
1710
+ "The FSDPParamInfo has the param keys "
1711
+ f"{sorted(fsdp_param_info.param_indices.keys())} while "
1712
+ "the output_states has the param keys "
1713
+ f"{sorted(output_states.keys())}."
1714
+ )
1715
+ return output_states
1716
+ else:
1717
+ return {}
1718
+
1719
+
1720
+ def _convert_state_with_orig_params(
1721
+ all_optim_state_keys: List[_OptimStateKey],
1722
+ optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]],
1723
+ fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo],
1724
+ optim_state_dict: Dict[Union[str, int], Any],
1725
+ to_save: bool,
1726
+ shard_state: bool,
1727
+ cpu_offload: bool = True,
1728
+ ) -> Dict[str, Any]:
1729
+ fsdp_osd_state: Dict[str, Any] = {}
1730
+ # This variable is used to deduplicate the FSDPParamInfo as one FSDPParamInfo
1731
+ # usually corresponds to multiple parameters. We could not use FSDPParamInfo
1732
+ # as the key because FSDPParamInfo is not hashable. As a result, we fall back
1733
+ # to `id(FSDPParamInfo)`, which the type is an integer.
1734
+ all_states: Dict[int, Dict[str, Any]] = {}
1735
+ # Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers
1736
+ # across ranks
1737
+ for optim_state_key in all_optim_state_keys:
1738
+ param_key: Union[str, int, None] = optim_state_key_to_param_key.get(
1739
+ optim_state_key, None
1740
+ )
1741
+
1742
+ if param_key is None and not optim_state_key.is_fsdp_managed:
1743
+ continue
1744
+
1745
+ if optim_state_key.is_fsdp_managed:
1746
+ fqn = optim_state_key.unflat_param_names[0]
1747
+ fsdp_param_info = fqn_to_fsdp_param_info.get(fqn, None)
1748
+ if fsdp_param_info is None:
1749
+ # This can happen if the not all FSDP instances have all the
1750
+ # parameters. This can happen with FSDP + some MPMD style
1751
+ # parallelism.
1752
+
1753
+ # TODO: it is unclear if we need to do the same check with
1754
+ # non-FSDP managed keys.
1755
+ continue
1756
+ state = {} if param_key is None else optim_state_dict[param_key]
1757
+ if id(fsdp_param_info) not in all_states:
1758
+ all_states[id(fsdp_param_info)] = {}
1759
+ all_states[id(fsdp_param_info)][fqn] = state
1760
+
1761
+ elif to_save:
1762
+ assert len(optim_state_key.unflat_param_names) == 1
1763
+ unflat_param_name = optim_state_key.unflat_param_names[0]
1764
+ with SimpleProfiler.profile("none_fsdp_managed_copy"):
1765
+ param_key = cast(Union[str, int], param_key)
1766
+ fsdp_osd_state[unflat_param_name] = copy.copy(
1767
+ optim_state_dict[param_key]
1768
+ )
1769
+ if cpu_offload:
1770
+ for state_name, value in sorted_items(
1771
+ fsdp_osd_state[unflat_param_name]
1772
+ ):
1773
+ if not torch.is_tensor(value):
1774
+ continue
1775
+ fsdp_osd_state[unflat_param_name][state_name] = value.cpu()
1776
+
1777
+ # Instead of gathering the state of each parameter individually, we perform
1778
+ # the gathering all at once to speed up the process.
1779
+ for _all_states in all_states.values():
1780
+ fqn = next(iter(_all_states.keys()))
1781
+ fsdp_param_info = fqn_to_fsdp_param_info[fqn]
1782
+ assert len(fsdp_param_info.param_requires_grad) > 0, (
1783
+ "With use_orig_params, FSDPParamInfo should have requires_grad "
1784
+ "information. However, the length is zero."
1785
+ )
1786
+ for key, idx in fsdp_param_info.param_indices.items():
1787
+ if key in _all_states:
1788
+ continue
1789
+ if not fsdp_param_info.param_requires_grad[idx]:
1790
+ continue
1791
+ raise RuntimeError(
1792
+ f"{key} is not in the optimizer state. "
1793
+ "The FSDPParamInfo has the param keys "
1794
+ f"{sorted(fsdp_param_info.param_indices.keys())} while "
1795
+ "the optimizer has the param keys "
1796
+ f"{sorted(_all_states.keys())}."
1797
+ )
1798
+ fsdp_osd_state.update(
1799
+ _gather_all_orig_param_state(
1800
+ fsdp_param_info,
1801
+ _all_states,
1802
+ shard_state,
1803
+ to_save,
1804
+ cpu_offload,
1805
+ )
1806
+ )
1807
+
1808
+ return fsdp_osd_state
1809
+
1810
+
1811
+ def _convert_state_with_flat_params(
1812
+ all_optim_state_keys: List[_OptimStateKey],
1813
+ optim_state_key_to_param_key: Dict[_OptimStateKey, Union[int, str]],
1814
+ fqn_to_fsdp_param_info: Dict[str, FSDPParamInfo],
1815
+ optim_state_dict: Dict[Union[str, int], Any],
1816
+ to_save: bool,
1817
+ shard_state: bool,
1818
+ cpu_offload: bool = True,
1819
+ ) -> Dict[str, Any]:
1820
+ fsdp_osd_state: Dict[str, Any] = {}
1821
+ # Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers
1822
+ # across ranks
1823
+ for optim_state_key in all_optim_state_keys:
1824
+ param_key: Union[str, int, None] = optim_state_key_to_param_key.get(
1825
+ optim_state_key, None
1826
+ )
1827
+
1828
+ assert param_key is not None, (
1829
+ "If use_orig_params is False, we must be able to find the "
1830
+ f"corresponding param id. {optim_state_key} {param_key}"
1831
+ )
1832
+
1833
+ if optim_state_key.is_fsdp_managed:
1834
+ # If there are multiple unflat_param_names (not use_orig_params),
1835
+ # they share the same FSDPParamInfo. So the first unflat_param_name
1836
+ # is sufficient to fetch the FSDPParamInfo.
1837
+ fqn = optim_state_key.unflat_param_names[0]
1838
+ fsdp_param_info = fqn_to_fsdp_param_info[fqn]
1839
+ unflat_state = _unflatten_optim_state(
1840
+ fsdp_param_info,
1841
+ optim_state_dict[param_key],
1842
+ to_save,
1843
+ shard_state,
1844
+ cpu_offload,
1845
+ )
1846
+ if to_save:
1847
+ assert len(unflat_state) == len(optim_state_key.unflat_param_names)
1848
+ for unflat_param_name, unflat_param_state in zip(
1849
+ optim_state_key.unflat_param_names,
1850
+ unflat_state,
1851
+ ):
1852
+ fsdp_osd_state[unflat_param_name] = unflat_param_state
1853
+ elif to_save:
1854
+ assert len(optim_state_key.unflat_param_names) == 1
1855
+ unflat_param_name = optim_state_key.unflat_param_names[0]
1856
+ fsdp_osd_state[unflat_param_name] = copy.copy(optim_state_dict[param_key])
1857
+ if cpu_offload:
1858
+ for state_name, value in sorted_items(
1859
+ fsdp_osd_state[unflat_param_name]
1860
+ ):
1861
+ if not torch.is_tensor(value):
1862
+ continue
1863
+ fsdp_osd_state[unflat_param_name][state_name] = value.cpu()
1864
+
1865
+ return fsdp_osd_state
1866
+
1867
+
1868
+ @torch.no_grad()
1869
+ def _optim_state_dict(
1870
+ model: nn.Module,
1871
+ optim: torch.optim.Optimizer,
1872
+ optim_state_dict: Dict[str, Any],
1873
+ optim_input: Optional[
1874
+ Union[
1875
+ List[Dict[str, Any]],
1876
+ Iterable[nn.Parameter],
1877
+ ]
1878
+ ],
1879
+ rank0_only: bool,
1880
+ shard_state: bool,
1881
+ group: Optional[dist.ProcessGroup],
1882
+ using_optim_input: bool,
1883
+ use_orig_params: bool = False,
1884
+ cpu_offload: bool = True,
1885
+ ) -> Dict[str, Any]:
1886
+ """
1887
+ Consolidates the optimizer state and returns it as a :class:`dict`
1888
+ following the convention of :meth:`torch.optim.Optimizer.state_dict`,
1889
+ i.e. with keys ``"state"`` and ``"param_groups"``.
1890
+ The flat parameters in ``FSDP`` modules contained in ``model`` are mapped
1891
+ back to their unflattened parameters.
1892
+
1893
+ Parameter keys are not well-defined. For a regular optimizer, the optimizer
1894
+ state_dict contains a mapping from parameter IDs to parameter states.
1895
+ Parameter IDs are the order of parameters in ``optim.param_groups()`` across
1896
+ all the groups. This API also allows user to pass ``optim_input`` for the
1897
+ mapping between parameters and parameter IDs. Using ``optim_input`` is being
1898
+ deprecated.
1899
+
1900
+ If the optimizer is a ``NamedOptimizer``, the optimizer state_dict does not
1901
+ contain parameter IDs mapping but a mapping from parameter FQNs to parameter
1902
+ states. This API finds the mapping from FQNs to parameters if the optimizer
1903
+ is a ``NamedOptimizer``.
1904
+
1905
+ If ``use_orig_params`` is True, each rank will have all FSDP-managed
1906
+ parameters but some of these parameters may be empty due to the sharding.
1907
+ For a regular optim.Optimizer, states for those empty parameters will
1908
+ not be initialized. So, when aggregating the FQNs across ranks, no assert
1909
+ will be raised on a rank even if it does not have all the states -- it is
1910
+ valid and FSDP knows how to aggregate them. However, FSDP has to ignore
1911
+ handling those parameters that are not managed by FSDP and do not exist on
1912
+ the local rank -- those are managed by other parallelisms and FSDP does not
1913
+ know how to handle/aggregate them.
1914
+
1915
+ Args:
1916
+ model (nn.Module): Root module (which may or may not be a
1917
+ :class:`FullyShardedDataParallel` instance) whose parameters
1918
+ were passed into the optimizer ``optim``.
1919
+ optim (torch.optim.Optimizer): Optimizer for ``model`` 's
1920
+ parameters.
1921
+ rank0_only (bool): If ``True``, saves the populated :class:`dict`
1922
+ only on rank 0; if ``False``, saves it on all ranks. (Default:
1923
+ ``True``)
1924
+ shard_state (bool): If ``True``, shard and distribute all
1925
+ non-zero-dimension states.
1926
+
1927
+ Returns:
1928
+ Dict[str, Any]: A :class:`dict` containing the optimizer state for
1929
+ ``model`` 's original unflattened parameters and including keys
1930
+ "state" and "param_groups" following the convention of
1931
+ :meth:`torch.optim.Optimizer.state_dict`. If ``rank0_only=False``,
1932
+ then nonzero ranks return an empty :class:`dict`.
1933
+ """
1934
+ SimpleProfiler.reset()
1935
+ cm = ExitStack()
1936
+ cm.enter_context(SimpleProfiler.profile(SimpleProfiler.Type.ALL))
1937
+ _reset_flat_param_grad_info_if_needed(traversal_utils._get_fsdp_handles(model))
1938
+ to_save = not rank0_only or dist.get_rank(group) == 0 or shard_state
1939
+
1940
+ with SimpleProfiler.profile("preprocessing"):
1941
+ param_to_fqns = _get_param_to_fqns(model)
1942
+ flat_param_to_fqn = _get_flat_param_to_fqn(model)
1943
+ is_named_optimizer = _is_named_optimizer(optim_state_dict)
1944
+
1945
+ param_key_to_param = cast(
1946
+ Dict[Union[int, str], nn.Parameter],
1947
+ (
1948
+ _get_param_id_to_param_from_optim_input(model, optim_input)
1949
+ if using_optim_input
1950
+ else _get_param_key_to_param(
1951
+ optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
1952
+ )
1953
+ ),
1954
+ )
1955
+ fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model)
1956
+
1957
+ with SimpleProfiler.profile("preprocessing_with_comm"):
1958
+ (
1959
+ all_optim_state_keys,
1960
+ optim_state_key_to_param_key,
1961
+ ) = _map_param_key_to_optim_keys(
1962
+ optim_state_dict,
1963
+ group,
1964
+ param_key_to_param,
1965
+ param_to_fqns,
1966
+ fqn_to_fsdp_param_info,
1967
+ merge_keys=use_orig_params,
1968
+ )
1969
+
1970
+ with SimpleProfiler.profile("state_converting"):
1971
+ convert_fn = (
1972
+ _convert_state_with_orig_params
1973
+ if use_orig_params
1974
+ else _convert_state_with_flat_params
1975
+ )
1976
+ fsdp_osd_state = convert_fn(
1977
+ all_optim_state_keys,
1978
+ optim_state_key_to_param_key,
1979
+ fqn_to_fsdp_param_info,
1980
+ optim_state_dict["state"],
1981
+ to_save,
1982
+ shard_state,
1983
+ cpu_offload,
1984
+ )
1985
+
1986
+ # At this point, communication is complete and ranks can return early if nothing
1987
+ # will be saved on that rank.
1988
+ if not to_save:
1989
+ return {}
1990
+
1991
+ fsdp_osd: Dict[str, Any] = {"state": fsdp_osd_state}
1992
+
1993
+ flat_param_fqns = set(flat_param_to_fqn.values())
1994
+ for key, value in optim_state_dict["state"].items():
1995
+ if key in fsdp_osd_state:
1996
+ continue
1997
+ if key in flat_param_fqns:
1998
+ continue
1999
+ if key in param_key_to_param:
2000
+ continue
2001
+ # This key is not recognized by FSDP. It may be a user-defined state
2002
+ # or some parameters state that FSDP is unable to map from
2003
+ # ``optim.param_groups``.
2004
+ warnings.warn(
2005
+ f"Found a optim state, {key}, that FSDP cannot process. FSDP "
2006
+ "will directly copy everything to the returned state_dict. In "
2007
+ "most cases, this is a user-defined state that is not "
2008
+ "associated with any particular parameter. Another possible "
2009
+ "case is this state is managed by TorchRec. Otherwise, there may "
2010
+ " be a mismatched assumption of optim_state_dict of this mode."
2011
+ )
2012
+ fsdp_osd_state[key] = value
2013
+
2014
+ if "param_groups" in optim_state_dict:
2015
+ fsdp_osd["param_groups"] = _unflatten_param_groups(
2016
+ optim_state_dict, param_key_to_param, param_to_fqns
2017
+ )
2018
+
2019
+ cm.close()
2020
+ SimpleProfiler.dump_and_reset("FSDP _optim_state_dict() profiling: ")
2021
+
2022
+ return fsdp_osd
2023
+
2024
+
2025
+ def _get_fqn_to_fsdp_param_info(model: nn.Module) -> Dict[str, FSDPParamInfo]:
2026
+ """
2027
+ Construct the mapping from a param's fqn to its corresponding ``FSDPParamInfo``
2028
+ if the param is managed by FSDP. Shared parameters, or original parameters that
2029
+ are shared across multiple nn.Modules, are required to belong to one and only
2030
+ one FSDP instance and thus correspond to one ``FlatParameter``. Within the one
2031
+ ``FlatParameter``, ``FlatParameter._fqns`` only stores the first FQN of a shared
2032
+ parameter. Thus, the keys in the mapping are guaranteed to map to unique parameters.
2033
+ """
2034
+
2035
+ def module_fn(module, prefix, tree_level, fqn_to_param_info):
2036
+ fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
2037
+ if fsdp_state is None:
2038
+ return
2039
+ _lazy_init(fsdp_state, module)
2040
+ handle = _module_handle(fsdp_state, module)
2041
+ if not handle:
2042
+ return
2043
+ flat_param = handle.flat_param
2044
+ fsdp_param_info = FSDPParamInfo(fsdp_state, handle, {}, [])
2045
+ # NOTE: `idx` indexes into the data structures *without* padding
2046
+ # elements
2047
+ for idx, local_fqn in enumerate(flat_param._fqns):
2048
+ fqn = clean_tensor_name(prefix + local_fqn)
2049
+ if fqn in fqn_to_param_info:
2050
+ assert fqn_to_param_info[fqn].handle.flat_param is flat_param, fqn
2051
+ fqn_to_param_info[fqn] = fsdp_param_info
2052
+ fsdp_param_info.param_indices[fqn] = idx
2053
+ if flat_param._params is not None:
2054
+ fsdp_param_info.param_requires_grad.append(
2055
+ flat_param._params[idx].requires_grad
2056
+ )
2057
+
2058
+ def return_fn(fqn_to_param_info):
2059
+ return fqn_to_param_info
2060
+
2061
+ fqn_to_param_info: Dict[str, FSDPParamInfo] = {}
2062
+ # FlatParameter._fqns stores the local fqn, starting from the root of the
2063
+ # FSDP. Using _apply_to_modules() with model (may not be the FSDP root
2064
+ # module) allows us to construct the global fqn.
2065
+ return _apply_to_modules(
2066
+ model,
2067
+ module_fn,
2068
+ return_fn,
2069
+ [fqn for fqn, _ in _named_parameters_with_duplicates(model)],
2070
+ fqn_to_param_info,
2071
+ )
2072
+
2073
+
2074
+ @no_type_check
2075
+ def _set_optim_use_dtensor(
2076
+ fsdp_state: _FSDPState,
2077
+ state_dict_settings: StateDictSettings,
2078
+ ) -> None:
2079
+ # If device_mesh is passed in when initalizing FSDP, we automatically turn the
2080
+ # _use_dtensor flag to be true for ShardedOptimStateDictConfig() if state_dict_type
2081
+ # has to be set to SHARDED_STATE_DICT.
2082
+ if getattr(fsdp_state, "_device_mesh", None):
2083
+ state_dict_type = state_dict_settings.state_dict_type
2084
+ if state_dict_type == StateDictType.LOCAL_STATE_DICT:
2085
+ raise RuntimeError(
2086
+ "Found state_dict_type LOCAL_STATE_DICT.",
2087
+ "DeviceMesh is not compatible with LOCAL_STATE_DICT.",
2088
+ "Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.",
2089
+ )
2090
+ else:
2091
+ state_dict_settings.optim_state_dict_config._use_dtensor = True
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_runtime_utils.py ADDED
@@ -0,0 +1,1638 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import functools
3
+ import logging
4
+ from enum import auto, Enum
5
+ from typing import Any, Callable, Dict, List, no_type_check, Optional, Set, Tuple
6
+
7
+ import torch
8
+ import torch.distributed as dist
9
+ import torch.distributed.fsdp._traversal_utils as traversal_utils
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ from torch.autograd import Variable
13
+ from torch.autograd.graph import register_multi_grad_hook
14
+ from torch.distributed.algorithms._comm_hooks import LOW_PRECISION_HOOKS
15
+ from torch.distributed.fsdp._common_utils import (
16
+ _assert_in_training_states,
17
+ _FSDPState,
18
+ _get_module_fsdp_state,
19
+ _is_composable,
20
+ _log_post_backward_hook,
21
+ _no_dispatch_record_stream,
22
+ clean_tensor_name,
23
+ TrainingState,
24
+ )
25
+ from torch.distributed.fsdp._flat_param import (
26
+ FlatParameter,
27
+ FlatParamHandle,
28
+ HandleShardingStrategy,
29
+ HandleTrainingState,
30
+ RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES,
31
+ )
32
+ from torch.distributed.fsdp._init_utils import HYBRID_SHARDING_STRATEGIES
33
+ from torch.distributed.fsdp.api import BackwardPrefetch
34
+ from torch.distributed.utils import (
35
+ _apply_to_tensors,
36
+ _cast_forward_inputs,
37
+ _p_assert,
38
+ _to_kwargs,
39
+ )
40
+ from torch.utils import _pytree as pytree
41
+
42
+
43
+ logger = logging.getLogger(__name__)
44
+
45
+ # Do not include "process_group" to enable hybrid shard and MoE cases
46
+ HOMOGENEOUS_ATTR_NAMES = (
47
+ "_use_orig_params",
48
+ "limit_all_gathers",
49
+ "_use_full_prec_in_eval",
50
+ )
51
+
52
+
53
+ class _PrefetchMode(Enum):
54
+ BACKWARD = auto()
55
+ FORWARD = auto()
56
+
57
+
58
+ def _get_fsdp_root_states_with_modules(
59
+ module: nn.Module,
60
+ ) -> Tuple[List[_FSDPState], List[nn.Module]]:
61
+ """
62
+ Returns a tuple containing:
63
+ 1. A list of the root ``_FSDPState`` instances in the module tree rooted at
64
+ ``module`` without any duplicates and following the ``module.modules()``
65
+ traversal order (which is assumed to be depth-first).
66
+ 2. A corresponding list of the root modules owning the states in the first
67
+ list.
68
+
69
+ This is similar to :func:`_get_fsdp_states_with_modules` except that we
70
+ must call :func:`_is_fsdp_root` to force a lazy initialization to determine
71
+ the FSDP root in case lazy initialization has not yet happened.
72
+ """
73
+ fsdp_root_states: List[_FSDPState] = []
74
+ fsdp_root_modules: List[nn.Module] = []
75
+ visited_fsdp_states: Set[_FSDPState] = set()
76
+ # NOTE: This function assumes that `module.modules()` proceeds top-down.
77
+ for submodule in module.modules():
78
+ optional_state = _get_module_fsdp_state(submodule)
79
+ if (
80
+ optional_state is not None
81
+ and optional_state not in visited_fsdp_states
82
+ and _is_fsdp_root(optional_state, submodule)
83
+ ):
84
+ visited_fsdp_states.add(optional_state)
85
+ fsdp_root_states.append(optional_state)
86
+ fsdp_root_modules.append(submodule)
87
+ return fsdp_root_states, fsdp_root_modules
88
+
89
+
90
+ def _get_fsdp_root_states(module: nn.Module) -> List[_FSDPState]:
91
+ """See :func:`_get_fsdp_root_states_with_modules`."""
92
+ fsdp_root_states, _ = _get_fsdp_root_states_with_modules(module)
93
+ return fsdp_root_states
94
+
95
+
96
+ def _is_fsdp_root(state: _FSDPState, module: nn.Module) -> bool:
97
+ """
98
+ Returns if ``state`` corresponds to that of an FSDP root.
99
+
100
+ For the wrapper code path, ``state`` and ``module`` should be the same. For
101
+ the non-wrapper code path, ``state`` should be ``module`` 's state.
102
+ """
103
+ # Force a lazy initialization to determine the FSDP root
104
+ _lazy_init(state, module)
105
+ assert state._is_root is not None # mypy
106
+ return state._is_root
107
+
108
+
109
+ @no_type_check
110
+ def _lazy_init(
111
+ state: _FSDPState,
112
+ root_module: nn.Module,
113
+ ) -> _FSDPState:
114
+ """
115
+ Performs initialization lazily, typically right before the first forward
116
+ pass. The laziness is needed to ensure that the parameter device/dtype and
117
+ the FSDP hierarchy have finalized. This method's actual logic only runs on
118
+ the root FSDP instance, which performs initialization for all non-root FSDP
119
+ instances to avoid partial initialization.
120
+
121
+ For the non-composable code path, ``state`` and ``root_module`` should be
122
+ the same, namely the FSDP instance itself.
123
+ """
124
+ if state._is_root is not None:
125
+ return # no-op: already lazily initialized
126
+ if not state._device_handle.is_available():
127
+ # Allow the FSDP constructor to run even without CUDA but check this
128
+ # once we start real execution
129
+ raise RuntimeError("FSDP does not support CPU only execution")
130
+ # The following logic is only run on the root FSDP instance since it will
131
+ # set `_is_root=False` for the non-root instances
132
+ state._is_root = True
133
+ _assert_in_training_states(state, [TrainingState.IDLE])
134
+ _check_flat_params_on_expected_device(state, root_module)
135
+ state._all_fsdp_states = traversal_utils._get_fsdp_states(root_module)
136
+ _init_streams(state)
137
+ buffers, buffer_dtypes = _get_buffers_and_dtypes_for_computation(state, root_module)
138
+ _cast_buffers_to_dtype_and_device(buffers, buffer_dtypes, state.compute_device)
139
+ state._exec_order_data.init(state, root_module, state.process_group)
140
+ _share_state_and_init_handle_attrs(state, root_module)
141
+ return state
142
+
143
+
144
+ def _check_flat_params_on_expected_device(state: _FSDPState, module: nn.Module):
145
+ """
146
+ Checks that all ``FlatParameter``s in ``module`` 's tree managed by
147
+ ``state`` are on the expected device for *lazy initialization*.
148
+ """
149
+ cpu_device = torch.device("cpu")
150
+ for handle in traversal_utils._get_fsdp_handles(module):
151
+ if (
152
+ not handle._offload_params
153
+ and handle.flat_param.device != state.compute_device
154
+ ):
155
+ raise RuntimeError(
156
+ "An FSDP-managed module unexpectedly has parameters on "
157
+ f"{handle.flat_param.device}. Make sure to move the module to "
158
+ f"{state.compute_device} before training."
159
+ )
160
+ elif handle._offload_params and handle.flat_param.device != cpu_device:
161
+ raise RuntimeError(
162
+ "An FSDP-managed module with parameter CPU offloading enabled "
163
+ f"has parameters on {handle.flat_param.device}. Make sure to "
164
+ f"not move the module from CPU when offloading parameters."
165
+ )
166
+
167
+
168
+ @no_type_check
169
+ def _share_state_and_init_handle_attrs(
170
+ root_state: _FSDPState,
171
+ root_module: nn.Module,
172
+ ) -> None:
173
+ """
174
+ Shares data structure state from the ``root_state`` to all FSDP states in
175
+ ``root_module`` 's module tree, and initializes handle attributes. These
176
+ are done together to require a single loop over the states.
177
+ """
178
+ handle = root_state._handle
179
+ if handle:
180
+ handle.init_flat_param_attributes()
181
+ attr_name_to_values: Dict[str, Set[Any]] = {}
182
+ for attr_name in HOMOGENEOUS_ATTR_NAMES:
183
+ attr_name_to_values[attr_name] = set()
184
+ root_state._all_handles = root_state._exec_order_data.all_handles # share reference
185
+ # Update _has_optim_in_backward for each handle.
186
+ for handle in root_state._all_handles:
187
+ flat_param = handle.flat_param
188
+ if hasattr(flat_param, "_in_backward_optimizers"):
189
+ raise RuntimeError(
190
+ "FSDP optimizer in backward only supported with use_orig_params=True!"
191
+ )
192
+ handle._has_optim_in_backward = flat_param._params is not None and any(
193
+ hasattr(param, "_in_backward_optimizers") for param in flat_param._params
194
+ )
195
+ if handle._has_optim_in_backward:
196
+ torch._C._log_api_usage_once("fsdp.optimizer_in_backward")
197
+ for fsdp_state in root_state._all_fsdp_states:
198
+ for attr_name in HOMOGENEOUS_ATTR_NAMES:
199
+ _p_assert(
200
+ hasattr(fsdp_state, attr_name),
201
+ f"FSDP state missing attribute {attr_name}",
202
+ )
203
+ attr_name_to_values[attr_name].add(getattr(fsdp_state, attr_name))
204
+ if fsdp_state is root_state:
205
+ continue
206
+ # Relax the assert for non-root FSDP instances in case the nested
207
+ # initialized module is wrapped again in FSDP later (e.g. after
208
+ # training to run inference)
209
+ _p_assert(
210
+ fsdp_state._is_root is None or not fsdp_state._is_root,
211
+ "Non-root FSDP instance's `_is_root` should not have been "
212
+ "set yet or should have been set to `False`",
213
+ )
214
+ fsdp_state._is_root = False
215
+ fsdp_state._unshard_stream = root_state._unshard_stream
216
+ fsdp_state._post_backward_stream = root_state._post_backward_stream
217
+ fsdp_state._pre_unshard_stream = root_state._pre_unshard_stream
218
+ fsdp_state._all_reduce_stream = root_state._all_reduce_stream
219
+ fsdp_state._default_stream = root_state._default_stream
220
+ fsdp_state._exec_order_data = root_state._exec_order_data
221
+ fsdp_state._free_event_queue = root_state._free_event_queue
222
+ if fsdp_state._fsdp_extension is not None:
223
+ fsdp_state._fsdp_extension.compute_stream = root_state._default_stream
224
+ handle = fsdp_state._handle
225
+ if handle:
226
+ handle.init_flat_param_attributes()
227
+ for attr_name, attr_values in attr_name_to_values.items():
228
+ if len(attr_values) != 1:
229
+ raise ValueError(
230
+ f"Expects one homogeneous value for {attr_name} but got {attr_values}"
231
+ )
232
+
233
+
234
+ @no_type_check
235
+ def _init_streams(
236
+ state: _FSDPState,
237
+ ) -> None:
238
+ """
239
+ Initializes CUDA streams for overlapping communication, computation, and
240
+ data transfers. The streams should be shared across FSDP instances.
241
+ """
242
+ assert state._is_root
243
+ assert state._device_handle.is_available()
244
+ uses_hybrid_sharding = any(
245
+ fsdp_state.sharding_strategy in HYBRID_SHARDING_STRATEGIES
246
+ for fsdp_state in state._all_fsdp_states
247
+ )
248
+ # Prioritize all-gathers/reduce-scatters over async all-reduce for HSDP and
249
+ # preserve the default priority of 0 otherwise
250
+ high_priority = -1 if state.limit_all_gathers and uses_hybrid_sharding else 0
251
+ # Default stream for computation
252
+ state._default_stream = state._device_handle.current_stream()
253
+ if state._fsdp_extension is not None:
254
+ # set the compute stream to the FSDP extension
255
+ state._fsdp_extension.compute_stream = state._default_stream
256
+
257
+ # Stream for unshard logic, including allocating the all-gather destination
258
+ # tensors and the all-gathers themselves
259
+ state._unshard_stream = state._device_handle.Stream(priority=high_priority)
260
+ # Stream for overlapping gradient reduction with the backward pass gradient
261
+ # computation
262
+ state._post_backward_stream = state._device_handle.Stream(priority=high_priority)
263
+ # Stream for pre-unshard logic, namely allocations and writes for CPU
264
+ # offloading (H2D copy) and mixed precision (low precision cast)
265
+ state._pre_unshard_stream = state._device_handle.Stream(priority=high_priority)
266
+ # Stream to run HSDP's all-reduce as async (if using HSDP)
267
+ state._all_reduce_stream = (
268
+ state._device_handle.Stream() if uses_hybrid_sharding else state._default_stream
269
+ )
270
+
271
+
272
+ @no_type_check
273
+ def _unshard(
274
+ state: _FSDPState,
275
+ handle: FlatParamHandle,
276
+ unshard_stream: torch.Stream,
277
+ pre_unshard_stream: torch.Stream,
278
+ ) -> None:
279
+ """
280
+ Unshards the handles in ``handles``. If the handles are in
281
+ :meth:`summon_full_params` and are using mixed precision, then they are
282
+ forced to full precision.
283
+
284
+ Postcondition: handle's ``FlatParameter`` 's data is the padded
285
+ unsharded flat parameter on the compute device.
286
+ """
287
+ if not handle:
288
+ return
289
+ with state._device_handle.stream(pre_unshard_stream):
290
+ ran_pre_unshard = handle.pre_unshard()
291
+ if ran_pre_unshard:
292
+ unshard_stream.wait_stream(pre_unshard_stream)
293
+ if state.limit_all_gathers:
294
+ event = state._free_event_queue.dequeue_if_needed()
295
+ if event:
296
+ with torch.profiler.record_function(
297
+ "FullyShardedDataParallel.rate_limiter"
298
+ ):
299
+ event.synchronize()
300
+ with state._device_handle.stream(unshard_stream):
301
+ handle.unshard()
302
+ handle.post_unshard()
303
+
304
+
305
+ @no_type_check
306
+ def _reshard(
307
+ state: _FSDPState,
308
+ handle: FlatParamHandle,
309
+ free_unsharded_flat_param: bool,
310
+ ):
311
+ """
312
+ Reshards the handle. ``free_unsharded_flat_param`` indicates whether to
313
+ free the handle's padded unsharded flat parameter.
314
+ """
315
+ handle.reshard(free_unsharded_flat_param)
316
+ if state.limit_all_gathers and free_unsharded_flat_param:
317
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
318
+ # We don't run a even queue for freeing under torch compile atm
319
+ # But maybe we need to? TODO(voz): Look into this
320
+ free_event = state._device_handle.Event()
321
+ free_event.record()
322
+ state._free_event_queue.enqueue(free_event)
323
+ handle.post_reshard()
324
+ # Flat parameter freed or not, we always have to "unshard" the parameter
325
+ # upon next access to get its shape correct.
326
+ handle._prefetched = False
327
+
328
+
329
+ def _unshard_grads(
330
+ handle: Optional[FlatParamHandle],
331
+ ) -> None:
332
+ if handle:
333
+ handle.unshard_grad()
334
+
335
+
336
+ def _reshard_grads(
337
+ handle: Optional[FlatParamHandle],
338
+ ) -> None:
339
+ if handle:
340
+ handle.reshard_grad()
341
+
342
+
343
+ @no_type_check
344
+ def _pre_forward(
345
+ state: _FSDPState,
346
+ handle: Optional[FlatParamHandle],
347
+ unshard_fn: Callable,
348
+ module: nn.Module,
349
+ args: Tuple[Any, ...],
350
+ kwargs: Dict[str, Any],
351
+ ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
352
+ """
353
+ Runs the pre-forward logic. This includes an opportunity to unshard
354
+ currently sharded parameters such as those for the current forward and
355
+ registering post-backward hooks for these current parameters. This function
356
+ also converts forward ``args`` and ``kwargs`` to the given precision.
357
+
358
+ Args:
359
+ handles (List[FlatParamHandle]): Handles giving the parameters used in
360
+ the current forward.
361
+ unshard_fn (Optional[Callable]): A callable to unshard any currently
362
+ sharded parameters or ``None`` to not do any unsharding.
363
+ module (nn.Module): Module whose forward this method runs right before;
364
+ expected by the hook signature.
365
+ args (Tuple[Any, ...]): Module forward ``args``.
366
+ kwargs (Dict[str, Any]): Module forward ``kwargs``.
367
+ """
368
+ with torch.profiler.record_function("FullyShardedDataParallel._pre_forward"):
369
+ # For `fully_shard` + `checkpoint`, skip pre-forward logic in the
370
+ # recomputed forward
371
+ if handle and handle._training_state == HandleTrainingState.BACKWARD_PRE:
372
+ # For both checkpoint implementations, we do not need to re-cast
373
+ # inputs here since they will be checkpointed in the low precision
374
+ # either by AC or normally by autograd as long as the AC region is
375
+ # nested within FSDP
376
+ return args, kwargs
377
+ state.training_state = TrainingState.FORWARD_BACKWARD
378
+ state._exec_order_data.record_pre_forward(handle, module.training)
379
+ if handle:
380
+ handle._training_state = HandleTrainingState.FORWARD
381
+ if unshard_fn is not None:
382
+ unshard_fn(state, handle)
383
+ # Register post-backward hooks to reshard the parameters and reduce-scatter
384
+ # their gradients. They must be re-registered every forward pass in case
385
+ # the `grad_fn` is mutated.
386
+ _register_post_backward_hook(state, handle)
387
+ # We have to reallocate the _cpu_grad if optimizer overlap
388
+ # set the grad to None in the backward pass.
389
+ if handle and handle._offload_params and handle.flat_param._cpu_grad is None:
390
+ handle.flat_param._cpu_grad = torch.zeros_like(
391
+ handle.flat_param._local_shard, device=torch.device("cpu")
392
+ ).pin_memory(device=state.compute_device)
393
+
394
+ should_cast_forward_inputs = (
395
+ state._handle and not state._handle._force_full_precision
396
+ )
397
+
398
+ if should_cast_forward_inputs and state.mixed_precision.cast_forward_inputs:
399
+ # Recursively convert args and kwargs to specified precision.
400
+ input_dtype: Optional[torch.dtype] = state.mixed_precision.param_dtype
401
+ args, kwargs = _cast_forward_inputs(input_dtype, *args, **kwargs)
402
+ _register_post_backward_reshard_only_hook(state, handle, args, kwargs)
403
+ return args, kwargs
404
+
405
+
406
+ @no_type_check
407
+ def _pre_forward_unshard(
408
+ state: _FSDPState,
409
+ handle: Optional[FlatParamHandle],
410
+ ) -> None:
411
+ """Unshards parameters in the pre-forward."""
412
+ if not handle:
413
+ return
414
+ # If the handles have been prefetched, then there is no need to call
415
+ # `_unshard()` again
416
+ if not handle._prefetched:
417
+ _unshard(state, handle, state._unshard_stream, state._pre_unshard_stream)
418
+ handle._needs_pre_forward_unshard = False
419
+ # Don't wait during trace
420
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
421
+ current_stream = state._device_handle.current_stream()
422
+ if state._unshard_event is not None:
423
+ current_stream.wait_event(state._unshard_event)
424
+ state._unshard_event = None
425
+ else:
426
+ current_stream.wait_stream(state._unshard_stream)
427
+ with torch.profiler.record_function(
428
+ "FullyShardedDataParallel._pre_forward_prefetch"
429
+ ):
430
+ _prefetch_handle(state, handle, _PrefetchMode.FORWARD)
431
+
432
+
433
+ @no_type_check
434
+ def _post_forward(
435
+ state: _FSDPState,
436
+ handle: Optional[FlatParamHandle],
437
+ reshard_fn: Callable,
438
+ module: nn.Module,
439
+ input: Any,
440
+ output: Any,
441
+ ) -> Any:
442
+ """
443
+ Runs the post-forward logic. This includes an opportunity to reshard
444
+ currently unsharded parameters such as those used in the current forward
445
+ and registering pre-backward hooks on the forward outputs.
446
+
447
+ Args:
448
+ handles (List[FlatParamHandle]): Handles giving the parameters used in
449
+ the current forward.
450
+ reshard_fn (Optional[Callable]): A callable to reshard any currently
451
+ unsharded parameters (e.g. from the current forward) or ``None`` to
452
+ not do any resharding.
453
+ module (nn.Module): Module whose forward just ran, which should be a
454
+ fully sharded module (see [Note: Fully Sharded Module]); expected
455
+ by the hook signature.
456
+ input (Any): Unused; expected by the hook signature.
457
+ output (Any): Forward pass output; pre-backward hooks are registered on
458
+ the tensors that require gradients in this output.
459
+
460
+ Postcondition: Each ``FlatParameter`` 's data points to the sharded flat
461
+ parameter.
462
+ """
463
+ with torch.profiler.record_function("FullyShardedDataParallel._post_forward"):
464
+ # For `fully_shard` + `checkpoint`, skip post-forward logic in the
465
+ # recomputed forward
466
+ if handle and handle._training_state == HandleTrainingState.BACKWARD_PRE:
467
+ return output
468
+
469
+ state._exec_order_data.record_post_forward(handle)
470
+ if reshard_fn is not None:
471
+ reshard_fn(state, handle)
472
+ # Register pre-backward hooks to unshard the flat parameters for the
473
+ # gradient computation (if needed)
474
+ output = _register_pre_backward_hooks(state, module, output, handle)
475
+ state.training_state = TrainingState.IDLE
476
+ if handle:
477
+ handle._training_state = HandleTrainingState.IDLE
478
+ return output
479
+
480
+
481
+ @no_type_check
482
+ def _post_forward_reshard(
483
+ state: _FSDPState,
484
+ handle: FlatParamHandle,
485
+ ) -> None:
486
+ """Reshards parameters in the post-forward."""
487
+ if not handle:
488
+ return
489
+ # Do not free the root's parameters in the post-forward for `FULL_SHARD`
490
+ # with the intention that they are immediately used for backward
491
+ # computation (though this may not be true)
492
+ free_unsharded_flat_param = (
493
+ not state._is_root
494
+ and handle._sharding_strategy in RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES
495
+ )
496
+ _reshard(state, handle, free_unsharded_flat_param)
497
+
498
+
499
+ @no_type_check
500
+ def _root_pre_forward(
501
+ state: _FSDPState,
502
+ module: nn.Module,
503
+ args,
504
+ kwargs,
505
+ ) -> None:
506
+ """
507
+ Runs pre-forward logic specific to the root FSDP instance, which should run
508
+ before any individual module's pre-forward. This starts with an attempt at
509
+ lazy initialization (which only runs non-vacuously once). Otherwise, if
510
+ this is called on a non-root FSDP instance, then it returns directly.
511
+
512
+ Args:
513
+ module (nn.Module): Module for which this logic tries to run. It may or
514
+ may not be the root. If not, then this method does not do anything.
515
+ """
516
+ with torch.profiler.record_function("FullyShardedDataParallel._root_pre_forward"):
517
+ _lazy_init(state, module)
518
+ _p_assert(state._is_root is not None, "Expects a root FSDP to have been set")
519
+ if not state._is_root:
520
+ # Always cast forward inputs in the root of this local FSDP unit for mixed
521
+ # precision, as this is where mixed precision could be configed.
522
+ # This is more useful for auto wrapping that is recommended in composable path.
523
+ # For manual wrapping, cast forward inputs on each local FSDP unit root will
524
+ # increase some overhead, so not turned on for model wrapper path right now where
525
+ # manual wrapping is more broadly used.
526
+ if _is_composable(state):
527
+ return _root_cast_forward_input(state, module, args, kwargs)
528
+ return args, kwargs
529
+
530
+ # We cast buffers back to full precision if we're forcing full precision. Disjointly, we check if buffers
531
+ # are in full precision and if we should cast them back to lower precision, which happens when
532
+ # exiting eval() mode.
533
+ handle = state._handle
534
+ if handle:
535
+ should_cast_buffers_to_full_prec = handle._force_full_precision
536
+ else:
537
+ should_cast_buffers_to_full_prec = True
538
+
539
+ if should_cast_buffers_to_full_prec:
540
+ _cast_buffers_to_dtype_and_device(
541
+ buffers=dict(module.named_buffers()).values(),
542
+ buffer_dtypes=list(state._buffer_name_to_orig_dtype.values()),
543
+ device=state.compute_device,
544
+ )
545
+ # This flag is only set when we cast buffers to full precision, to avoid the
546
+ # CPU overhead that can stem from retrieving all buffers and their types in the
547
+ # following else branch.
548
+ state._needs_buffer_dtype_restore_check = True
549
+ elif getattr(state, "_needs_buffer_dtype_restore_check", False):
550
+ # Check if buffers are in full precision and we need to cast them
551
+ # back down.
552
+ (
553
+ buffers,
554
+ buffer_dtypes_for_computation,
555
+ ) = _get_buffers_and_dtypes_for_computation(state, module)
556
+ if len(buffers) > 0 and len(buffer_dtypes_for_computation) > 0:
557
+ if any(
558
+ buffer.dtype != buffer_dtype_for_computation
559
+ for buffer, buffer_dtype_for_computation in zip(
560
+ buffers, buffer_dtypes_for_computation
561
+ )
562
+ ):
563
+ # Assume we have to cast everything if there is one mismatch
564
+ _cast_buffers_to_dtype_and_device(
565
+ buffers, buffer_dtypes_for_computation, state.compute_device
566
+ )
567
+ # We don't have to check this again until we cast buffers to full precision again.
568
+ state._needs_buffer_dtype_restore_check = False
569
+
570
+ if state.forward_prefetch:
571
+ handles = []
572
+ for fsdp_state in state._all_fsdp_states:
573
+ if fsdp_state._handle:
574
+ handles.append(fsdp_state._handle)
575
+ for handle in handles:
576
+ handle._needs_pre_forward_unshard = True
577
+ handle._prefetched = False
578
+ _wait_for_computation_stream(
579
+ state._device_handle.current_stream(),
580
+ state._unshard_stream,
581
+ state._pre_unshard_stream,
582
+ )
583
+ _reset_flat_param_grad_info_if_needed(state._all_handles)
584
+
585
+ # Prepares the forward inputs by moving them to ``compute_device``
586
+ # TODO: Do not use the side stream for tensor copies for now; investigate
587
+ # the perf with/without it.
588
+ with torch.profiler.record_function("FullyShardedDataParallel._to_kwargs"):
589
+ args_tuple, kwargs_tuple = _to_kwargs(
590
+ args, kwargs, state.compute_device, False
591
+ )
592
+ args = args_tuple[0]
593
+ kwargs = kwargs_tuple[0]
594
+
595
+ return _root_cast_forward_input(state, module, args, kwargs)
596
+
597
+
598
+ @no_type_check
599
+ def _root_cast_forward_input(
600
+ state: _FSDPState, module: torch.nn.Module, args, kwargs
601
+ ) -> Tuple[Any, Any]:
602
+ if state._handle:
603
+ force_full_precision = not state._handle._force_full_precision
604
+ else:
605
+ force_full_precision = True
606
+
607
+ should_cast_forward_inputs = (
608
+ (module.training or not state._use_full_prec_in_eval) and force_full_precision
609
+ ) and state.mixed_precision.cast_root_forward_inputs
610
+
611
+ if should_cast_forward_inputs:
612
+ input_dtype: Optional[torch.dtype] = state.mixed_precision.param_dtype
613
+ args, kwargs = _cast_forward_inputs(input_dtype, *args, **kwargs)
614
+
615
+ return args, kwargs
616
+
617
+
618
+ @no_type_check
619
+ def _pre_backward_hook(
620
+ state: _FSDPState,
621
+ module: nn.Module,
622
+ handle: FlatParamHandle,
623
+ grad,
624
+ *unused: Any,
625
+ ) -> Any:
626
+ """
627
+ Prepares ``_handle`` 's ``FlatParameter`` s for gradient computation.
628
+
629
+ Args:
630
+ module (nn.Module): Fully sharded module (see [Note: Fully Sharded
631
+ Module]).
632
+ """
633
+ # Only run the pre-backward hook once per group of handles involved in the
634
+ # same module forward computation
635
+ if (
636
+ handle
637
+ and hasattr(handle, "_ran_pre_backward_hook")
638
+ and handle._ran_pre_backward_hook
639
+ ):
640
+ return grad
641
+
642
+ with torch.profiler.record_function("FullyShardedDataParallel._pre_backward_hook"):
643
+ # Queue the post-backward callback once for the root FSDP instance to
644
+ # attach it to the outermost backward graph task so that it is called
645
+ # after all backward calls complete
646
+ if state._is_root and not state._post_backward_callback_queued:
647
+ _register_post_backward_final_callback(state, module)
648
+ _reset_flat_param_grad_info_if_needed(state._all_handles)
649
+ elif handle:
650
+ allowed_states = [TrainingState.IDLE]
651
+ if _is_composable(state):
652
+ allowed_states.append(TrainingState.FORWARD_BACKWARD)
653
+ _assert_in_training_states(state, allowed_states)
654
+ state.training_state = TrainingState.FORWARD_BACKWARD
655
+ # Queueing the post-backward callback is the only logic that is not
656
+ # per-handle in the pre-backward hook, so we can return early here if
657
+ # there are no handles.
658
+ if not handle:
659
+ return grad
660
+ handle._training_state = HandleTrainingState.BACKWARD_PRE
661
+
662
+ if handle._needs_pre_backward_unshard:
663
+ # If the handles have been prefetched, then there is no need to
664
+ # call `_unshard()` again
665
+ if not handle._prefetched:
666
+ _unshard(
667
+ state,
668
+ handle,
669
+ state._unshard_stream,
670
+ state._pre_unshard_stream,
671
+ )
672
+ # Don't wait during trace
673
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
674
+ state._device_handle.current_stream().wait_stream(state._unshard_stream)
675
+
676
+ # Set this to `False` to ensure that a mistargeted prefetch does not
677
+ # actually unshard these handles
678
+ handle._needs_pre_backward_unshard = False
679
+ with torch.profiler.record_function(
680
+ "FullyShardedDataParallel._pre_backward_prefetch"
681
+ ):
682
+ _prefetch_handle(state, handle, _PrefetchMode.BACKWARD)
683
+ handle.prepare_gradient_for_backward()
684
+ handle._ran_pre_backward_hook = True
685
+ return grad
686
+
687
+
688
+ @no_type_check
689
+ @torch.no_grad()
690
+ def _post_backward_hook(
691
+ state: _FSDPState,
692
+ handle: FlatParamHandle,
693
+ flat_param,
694
+ *unused: Any,
695
+ ):
696
+ """
697
+ Reduce-scatters the gradient of ``handle`` 's ``FlatParameter``.
698
+
699
+ Precondition: The ``FlatParameter`` 's ``.grad`` attribute contains the
700
+ unsharded gradient for the local batch.
701
+
702
+ Postcondition:
703
+ - If using ``NO_SHARD``, then the ``.grad`` attribute is the reduced
704
+ unsharded gradient.
705
+ - Otherwise, the ``_saved_grad_shard`` attribute is the reduced sharded
706
+ gradient (accumulating with any existing gradient).
707
+ """
708
+ _log_post_backward_hook(state, handle, logger)
709
+ flat_param = handle.flat_param
710
+ flat_param._post_backward_called = True
711
+ with torch.autograd.profiler.record_function(
712
+ "FullyShardedDataParallel._post_backward_hook"
713
+ ):
714
+ _assert_in_training_states(state, [TrainingState.FORWARD_BACKWARD])
715
+ # For multiple applications of reentrant AC across submodules sharing
716
+ # the same `FlatParameter`, the post-backward hook may run multiple
717
+ # times in one backward, in which case we permit the state to already
718
+ # be in `BACKWARD_POST`.
719
+ _p_assert(
720
+ handle._training_state
721
+ in (HandleTrainingState.BACKWARD_PRE, HandleTrainingState.BACKWARD_POST),
722
+ f"Expects `BACKWARD_PRE` or `BACKWARD_POST` state but got {handle._training_state}",
723
+ )
724
+ handle._training_state = HandleTrainingState.BACKWARD_POST
725
+
726
+ if flat_param.grad is None:
727
+ return
728
+ if flat_param.grad.requires_grad:
729
+ raise RuntimeError("FSDP does not support gradients of gradients")
730
+
731
+ _post_backward_reshard(state, handle)
732
+ if not state._sync_gradients:
733
+ if handle._use_orig_params:
734
+ handle._use_unsharded_grad_views()
735
+ return
736
+
737
+ # Wait for all ops in the current stream (e.g. gradient computation) to
738
+ # finish before reduce-scattering the gradient
739
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
740
+ state._post_backward_stream.wait_stream(
741
+ state._device_handle.current_stream()
742
+ )
743
+
744
+ with state._device_handle.stream(state._post_backward_stream):
745
+ autograd_computed_grad = flat_param.grad.data
746
+ if (
747
+ not _low_precision_hook_enabled(state)
748
+ and flat_param.grad.dtype != handle._reduce_dtype
749
+ # If we are forcing full precision but communicating grads
750
+ # (i.e. model.eval() + full precision in eval was configured), don't downcast gradient.
751
+ and not handle._force_full_precision
752
+ ):
753
+ flat_param.grad.data = flat_param.grad.to(handle._reduce_dtype)
754
+ if handle.uses_sharded_strategy:
755
+ _reduce_grad(state, handle)
756
+ else:
757
+ _reduce_grad_no_shard(state, handle)
758
+ # Since the unsharded gradient is produced in the computation
759
+ # stream and consumed in the post-backward stream, inform the
760
+ # caching allocator (before it goes out of scope)
761
+ _no_dispatch_record_stream(
762
+ autograd_computed_grad, state._post_backward_stream
763
+ )
764
+
765
+
766
+ def _post_backward_reshard_only_hook(
767
+ state: _FSDPState,
768
+ handle: FlatParamHandle,
769
+ *unused: Any,
770
+ ) -> None:
771
+ with torch.profiler.record_function(
772
+ "FullyShardedDataParallel._post_backward_hook_reshard_only"
773
+ ):
774
+ # `_pre_backward_hook` may not get executed
775
+ # if forward output does not require grad
776
+ # overwrite IDLE state for post-backward prefetching
777
+ state.training_state = TrainingState.FORWARD_BACKWARD
778
+ handle._training_state = HandleTrainingState.BACKWARD_POST
779
+ _post_backward_reshard(state, handle)
780
+
781
+
782
+ def _post_backward_reshard(
783
+ state: _FSDPState,
784
+ handle: FlatParamHandle,
785
+ *unused: Any,
786
+ ) -> None:
787
+ free_unsharded_flat_param = _should_free_in_backward(state, handle)
788
+ _reshard(state, handle, free_unsharded_flat_param)
789
+
790
+ # TODO: Post-backward prefetching does not support the multiple handles
791
+ # per module case since the post-backward hook runs per handle, not per
792
+ # group of handles.
793
+ with torch.profiler.record_function(
794
+ "FullyShardedDataParallel._post_backward_prefetch"
795
+ ):
796
+ _prefetch_handle(state, handle, _PrefetchMode.BACKWARD)
797
+
798
+
799
+ @no_type_check
800
+ def _should_free_in_backward(
801
+ state: _FSDPState,
802
+ handle: FlatParamHandle,
803
+ ) -> bool:
804
+ """
805
+ Returns whether FSDP should free the unsharded flat parameter in the
806
+ post-backward or not.
807
+ """
808
+ if not handle.uses_sharded_strategy:
809
+ return False
810
+ # If not syncing gradients, then we do not free for strategies that do not
811
+ # reshard after forward as a *heuristic* to tradeoff higher memory for
812
+ # higher throughput.
813
+ return (
814
+ state._sync_gradients
815
+ or handle._sharding_strategy in RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES
816
+ )
817
+
818
+
819
+ @no_type_check
820
+ def _reduce_grad(state: _FSDPState, handle: FlatParamHandle) -> None:
821
+ """
822
+ For sharded strategies, this runs gradient reduction, sharded gradient
823
+ accumulation if needed, and the post-reduction callback.
824
+ """
825
+ flat_param = handle.flat_param
826
+ uses_hybrid_sharded_strategy = handle._sharding_strategy in (
827
+ HandleShardingStrategy.HYBRID_SHARD,
828
+ HandleShardingStrategy._HYBRID_SHARD_ZERO2,
829
+ )
830
+ # We clear `.grad` to permit multiple backwards. This avoids a race where
831
+ # the second backward pass computation precedes ahead of the first backward
832
+ # pass reduction, which is possible since the reduction is issued in a
833
+ # separate stream and is async and would result in reducing the wrong
834
+ # gradient.
835
+ unsharded_grad = flat_param.grad.data
836
+ flat_param.grad = None
837
+ padded_unsharded_grad, new_sharded_grad = _get_reduce_scatter_tensors(
838
+ state, unsharded_grad
839
+ )
840
+ if state._comm_hook is None: # default path
841
+ _div_if_needed(padded_unsharded_grad, state._gradient_predivide_factor)
842
+ pg = (
843
+ handle._fake_process_group
844
+ if handle._use_fake_reduce
845
+ else state.process_group
846
+ )
847
+ dist.reduce_scatter_tensor(
848
+ new_sharded_grad,
849
+ padded_unsharded_grad,
850
+ group=pg,
851
+ )
852
+ if uses_hybrid_sharded_strategy:
853
+ # Don't wait during trace
854
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
855
+ state._all_reduce_stream.wait_stream(state._post_backward_stream)
856
+ with state._device_handle.stream(state._all_reduce_stream):
857
+ # Since the new sharded gradient is produced in the post-
858
+ # backward stream and consumed in the all-reduce stream,
859
+ # inform the caching allocator
860
+ _no_dispatch_record_stream(new_sharded_grad, state._all_reduce_stream)
861
+ dist.all_reduce(new_sharded_grad, group=state._inter_node_pg)
862
+ _div_if_needed(new_sharded_grad, state._gradient_postdivide_factor)
863
+ grad_to_offload = _accumulate_sharded_grad(
864
+ state, handle, new_sharded_grad
865
+ )
866
+ _post_reduce_grad_callback(state, handle, grad_to_offload)
867
+ return
868
+ _div_if_needed(new_sharded_grad, state._gradient_postdivide_factor)
869
+ else:
870
+ state._comm_hook(
871
+ state._comm_hook_state, padded_unsharded_grad, new_sharded_grad
872
+ )
873
+ # NOTE: HSDP variants do not support communication hook.
874
+ grad_to_offload = _accumulate_sharded_grad(state, handle, new_sharded_grad)
875
+ _post_reduce_grad_callback(state, handle, grad_to_offload)
876
+
877
+
878
+ @no_type_check
879
+ def _get_reduce_scatter_tensors(
880
+ state: _FSDPState, unsharded_grad: torch.Tensor
881
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
882
+ """
883
+ Returns the input and output tensors to reduce-scatter, respectively.
884
+ """
885
+ chunks = list(unsharded_grad.chunk(state.world_size))
886
+ numel_to_pad = state.world_size * chunks[0].numel() - unsharded_grad.numel()
887
+ padded_unsharded_grad = (
888
+ F.pad(unsharded_grad, [0, numel_to_pad]) if numel_to_pad > 0 else unsharded_grad
889
+ )
890
+ new_sharded_grad = torch.empty_like(chunks[0]) # padded
891
+ return padded_unsharded_grad, new_sharded_grad
892
+
893
+
894
+ @no_type_check
895
+ def _accumulate_sharded_grad(
896
+ state: _FSDPState,
897
+ handle: FlatParamHandle,
898
+ sharded_grad: torch.Tensor,
899
+ ) -> torch.Tensor:
900
+ """
901
+ Accumulates the reduce-scattered sharded gradient with any existing sharded
902
+ gradient if needed, returning the gradient to offload (if CPU offloading is
903
+ enabled).
904
+ """
905
+ flat_param = handle.flat_param
906
+ _cast_grad_to_param_dtype(state, sharded_grad, flat_param)
907
+ # Save the sharded gradient in `_saved_grad_shard` to support gradient
908
+ # accumulation -- for multiple backwards, the gradient reductions may
909
+ # happen in arbitrary order
910
+ accumulate_grad = hasattr(flat_param, "_saved_grad_shard")
911
+ if accumulate_grad:
912
+ _check_grad_to_accumulate(sharded_grad, flat_param._saved_grad_shard)
913
+ flat_param._saved_grad_shard += sharded_grad
914
+ else:
915
+ flat_param._saved_grad_shard = sharded_grad
916
+ grad_to_offload = flat_param._saved_grad_shard
917
+ return grad_to_offload
918
+
919
+
920
+ @no_type_check
921
+ def _reduce_grad_no_shard(state: _FSDPState, handle: FlatParamHandle) -> None:
922
+ """
923
+ For no-shard, this runs gradient reduction (which directly covers any
924
+ gradient accumulation implicitly) and the post-reduction callback.
925
+ """
926
+ flat_param = handle.flat_param
927
+ if state._comm_hook is None: # default path
928
+ _div_if_needed(flat_param.grad, state._gradient_predivide_factor)
929
+ dist.all_reduce(flat_param.grad, group=state.process_group)
930
+ _div_if_needed(flat_param.grad, state._gradient_postdivide_factor)
931
+ else:
932
+ state._comm_hook(state._comm_hook_state, flat_param.grad)
933
+ # For `NO_SHARD`, we can keep the low precision gradients by simply
934
+ # omitting the cast altogether
935
+ if not handle._keep_low_precision_grads:
936
+ _cast_grad_to_param_dtype(state, flat_param.grad, flat_param)
937
+ grad_to_offload = flat_param.grad.data
938
+ _post_reduce_grad_callback(state, handle, grad_to_offload)
939
+
940
+
941
+ @no_type_check
942
+ def _post_reduce_grad_callback(
943
+ state: _FSDPState,
944
+ handle: FlatParamHandle,
945
+ # Additional arguments needed for the callback logic
946
+ grad_to_offload: torch.Tensor,
947
+ ):
948
+ """
949
+ This callback captures any logic to run after the gradient reduction
950
+ finishes. Currently, this offloads the gradient to CPU if CPU offloading is
951
+ enabled and uses sharded gradient views if ``use_orig_params=True``.
952
+ """
953
+ _offload_grad(state, handle, grad_to_offload)
954
+ _post_backward_use_sharded_grad_views(handle)
955
+
956
+
957
+ @no_type_check
958
+ def _offload_grad(
959
+ state: _FSDPState,
960
+ handle: FlatParamHandle,
961
+ grad_to_offload: torch.Tensor,
962
+ ):
963
+ if not handle._offload_params:
964
+ return
965
+ # Offload the gradient to CPU to ensure parameters and gradients are on the
966
+ # same device as required by the optimizer
967
+ # TODO: Investigate why `NO_SHARD` breaks correctness when using
968
+ # `non_blocking=True` here.
969
+ # TODO (rohan-varma): When CPU offload and optimizer overlap,
970
+ # non_blocking=True won't work since the copy may have not finished before
971
+ # the optimizer step executes on CPU. If we want to use non-blocking=True
972
+ # here, we'll have to synchronize before using result on CPU.
973
+ non_blocking = handle.uses_sharded_strategy and not handle._has_optim_in_backward
974
+ handle.flat_param._cpu_grad.copy_(
975
+ grad_to_offload.detach(), non_blocking=non_blocking
976
+ ) # synchronized in the post-backward callback
977
+ # Since the gradient being offloaded may have been produced in the
978
+ # computation stream and is being consumed here in the post-backward
979
+ # stream, inform the caching allocator
980
+ _no_dispatch_record_stream(grad_to_offload.data, state._post_backward_stream)
981
+
982
+
983
+ @no_type_check
984
+ def _post_backward_use_sharded_grad_views(handle: FlatParamHandle):
985
+ if not handle._use_orig_params:
986
+ return
987
+ # Since the handle's `FlatParameter` completed its gradient computation, we
988
+ # should reset the gradient noneness mask
989
+ handle._reset_is_grad_none()
990
+ # Delay using sharded gradient views until after the reduce-scatter instead
991
+ # of immediately after resharding
992
+ handle._use_sharded_grad_views()
993
+ if handle._has_optim_in_backward:
994
+ handle.prepare_gradient_for_optim()
995
+ for orig_param in handle.flat_param._params:
996
+ # Check for `None` gradient to filter parameters not in the rank
997
+ if orig_param.grad is not None and hasattr(
998
+ orig_param, "_in_backward_optimizers"
999
+ ):
1000
+ # TODO (rohan-varma): For CPU offload, this unfortunately
1001
+ # operates on CPU because the parameters and gradients have
1002
+ # already been offloaded. We should run this on GPU after
1003
+ # refactoring.
1004
+ for optim in orig_param._in_backward_optimizers:
1005
+ optim.step()
1006
+
1007
+ optim.zero_grad(set_to_none=True)
1008
+ handle._reset_flat_param_grad_info_if_needed()
1009
+ if handle._offload_params:
1010
+ handle.flat_param._cpu_grad = None
1011
+
1012
+
1013
+ def _div_if_needed(tensor: torch.Tensor, div_factor: float) -> None:
1014
+ if div_factor > 1:
1015
+ tensor.div_(div_factor)
1016
+
1017
+
1018
+ @no_type_check
1019
+ def _cast_grad_to_param_dtype(
1020
+ state: _FSDPState,
1021
+ sharded_grad: torch.Tensor,
1022
+ param: FlatParameter,
1023
+ ):
1024
+ """
1025
+ Casts ``sharded_grad`` back to the full parameter dtype so that the
1026
+ optimizer step runs with that dtype. This performs an actual cast if
1027
+ 1. parameters were in reduced precision during the forward since then
1028
+ gradients would be in that reduced precision, or
1029
+ 2. parameters were not in reduced precision but gradients were in
1030
+ reduced precision for communication.
1031
+ However, if a low precision communication hook is registered, then this
1032
+ dtype cast happens in the hook instead.
1033
+ """
1034
+ _assert_in_training_states(state, [TrainingState.FORWARD_BACKWARD])
1035
+ if not _low_precision_hook_enabled(state) and sharded_grad.dtype != param.dtype:
1036
+ low_prec_grad_data = sharded_grad.data
1037
+ sharded_grad.data = sharded_grad.data.to(dtype=param.dtype)
1038
+ # Since for `NO_SHARD`, the gradient is produced in the computation
1039
+ # stream and consumed here in the post-backward stream, inform the
1040
+ # caching allocator; for the sharded strategies, the gradient is
1041
+ # produced in the post-backward stream, so this `record_stream()`
1042
+ # should be a no-op
1043
+ _no_dispatch_record_stream(
1044
+ low_prec_grad_data, state._device_handle.current_stream()
1045
+ )
1046
+
1047
+
1048
+ def _check_grad_to_accumulate(
1049
+ new_sharded_grad: torch.Tensor,
1050
+ accumulated_grad: torch.Tensor,
1051
+ ) -> None:
1052
+ _p_assert(
1053
+ accumulated_grad.shape == new_sharded_grad.shape,
1054
+ "Shape mismatch when accumulating gradients: "
1055
+ f"existing gradient shape={accumulated_grad.shape} "
1056
+ f"new gradient shape={new_sharded_grad.shape}",
1057
+ )
1058
+ _p_assert(
1059
+ accumulated_grad.device == new_sharded_grad.device,
1060
+ "Device mismatch when accumulating gradients: "
1061
+ f"existing gradient device={accumulated_grad.device} "
1062
+ f"new gradient device={new_sharded_grad.device}",
1063
+ )
1064
+
1065
+
1066
+ @no_type_check
1067
+ def _low_precision_hook_enabled(state: _FSDPState) -> bool:
1068
+ return state._comm_hook in LOW_PRECISION_HOOKS
1069
+
1070
+
1071
+ @no_type_check
1072
+ @torch.no_grad()
1073
+ def _post_backward_final_callback(
1074
+ state: _FSDPState,
1075
+ module: nn.Module,
1076
+ ):
1077
+ """
1078
+ This waits for the post-backward to finish and performs some final cleanup.
1079
+ This runs at the end of the entire backward pass and should only be called
1080
+ on the root FSDP instance.
1081
+ """
1082
+ _p_assert(
1083
+ state._is_root,
1084
+ "The post-backward callback should only be called on the root FSDP instance",
1085
+ )
1086
+ root_state = state
1087
+
1088
+ if root_state._sync_gradients:
1089
+ current_stream = state._device_handle.current_stream()
1090
+ # TODO (rohan-varma): this also waits for the overlapped optimizer step to finish
1091
+ # since it currently runs in the post-backward stream. That can be
1092
+ # pushed to the next forward if run in a different stream
1093
+ current_stream.wait_stream(root_state._post_backward_stream)
1094
+ if root_state._all_reduce_stream is not current_stream: # uses HSDP
1095
+ current_stream.wait_stream(root_state._all_reduce_stream)
1096
+ if root_state.cpu_offload.offload_params:
1097
+ # Wait for non-blocking GPU -> CPU sharded gradient copies from the
1098
+ # post-backward hooks to finish explicitly since CPU gradients do
1099
+ # not automatically synchronize with the GPU
1100
+ state._device_handle.current_stream().synchronize()
1101
+ root_state._exec_order_data.next_iter()
1102
+
1103
+ for fsdp_state in state._all_fsdp_states:
1104
+ _catch_all_reshard(fsdp_state)
1105
+ _finalize_params(fsdp_state)
1106
+ fsdp_state.training_state = TrainingState.IDLE
1107
+ handle = fsdp_state._handle
1108
+ if handle:
1109
+ handle._ran_pre_backward_hook = False
1110
+ handle._needs_pre_backward_unshard = False
1111
+ handle._post_forward_index = None
1112
+ handle._training_state = HandleTrainingState.IDLE
1113
+ handle._prefetched = False
1114
+ # Reset for cases like one forward and multiple backwards
1115
+ root_state._post_backward_callback_queued = False
1116
+
1117
+
1118
+ @no_type_check
1119
+ def _catch_all_reshard(
1120
+ state: _FSDPState,
1121
+ ) -> None:
1122
+ """
1123
+ Reshards the parameters that may not have been resharded in the
1124
+ post-backward hook. This can happen when a module's output is used in the
1125
+ forward pass, meaning that its pre-backward hook runs (unsharding the
1126
+ parameter), but the post-backward hook does not run because the output was
1127
+ not jused in the loss computation corresponding to this backward pass.
1128
+ """
1129
+ # Wrap with a try-except to provide a more informative traceback if an
1130
+ # error is raised
1131
+ try:
1132
+ if state._handle:
1133
+ # TODO: This already-resharded check is brittle:
1134
+ # https://github.com/pytorch/pytorch/issues/83956
1135
+ already_resharded = (
1136
+ state._handle.flat_param.data_ptr()
1137
+ == state._handle.flat_param._local_shard.data_ptr()
1138
+ # If FSDP skipped using sharded views, then the flat parameter
1139
+ # still points to the sharded data, so we need to reshard to
1140
+ # use sharded views
1141
+ and not state._handle._skipped_use_sharded_views
1142
+ )
1143
+ if already_resharded:
1144
+ return
1145
+ free_unsharded_flat_param = _should_free_in_backward(state, state._handle)
1146
+ _reshard(state, state._handle, free_unsharded_flat_param)
1147
+ except Exception as e:
1148
+ _p_assert(
1149
+ False,
1150
+ f"Got exception in the catch-all reshard for {state}: {str(e)}",
1151
+ raise_assertion_error=False,
1152
+ )
1153
+ raise e
1154
+
1155
+
1156
+ @no_type_check
1157
+ def _finalize_params(
1158
+ state: _FSDPState,
1159
+ ) -> None:
1160
+ """Finalizes the parameters before the next iteration."""
1161
+ handle = state._handle
1162
+ if not handle:
1163
+ return
1164
+ flat_param = handle.flat_param
1165
+ if torch.distributed._functional_collectives.is_torchdynamo_compiling():
1166
+ if hasattr(flat_param, "_post_backward_hook_handle"):
1167
+ pbhs_handle = flat_param._post_backward_hook_handle
1168
+ pbhs_handle.remove()
1169
+ del flat_param._post_backward_hook_handle
1170
+ else:
1171
+ if hasattr(flat_param, "_post_backward_hook_state"):
1172
+ post_backward_hook_state_len = len(flat_param._post_backward_hook_state)
1173
+ expected_post_backward_hook_state_len = int(flat_param.requires_grad) + 1
1174
+ _p_assert(
1175
+ post_backward_hook_state_len == expected_post_backward_hook_state_len,
1176
+ f"Invalid: ``_post_backward_hook_state``: {flat_param._post_backward_hook_state}",
1177
+ )
1178
+ flat_param._post_backward_hook_state[-1].remove()
1179
+ delattr(flat_param, "_post_backward_hook_state")
1180
+ if flat_param.requires_grad:
1181
+ if not state._sync_gradients:
1182
+ # Preserve the gradient accumulation state if not synchronizing
1183
+ # gradients: `.grad` remains the unsharded gradient from prior
1184
+ # `no_sync()` iterations, and `_saved_grad_shard` remains the
1185
+ # sharded gradient from the last synchronized iteration
1186
+ return
1187
+ if not handle._has_optim_in_backward:
1188
+ handle.prepare_gradient_for_optim()
1189
+ _p_assert(
1190
+ hasattr(flat_param, "_post_backward_called"),
1191
+ "Expects `_post_backward_called` to be set on the `FlatParameter`",
1192
+ )
1193
+ flat_param._post_backward_called = False
1194
+
1195
+
1196
+ @no_type_check
1197
+ def _prefetch_handle(
1198
+ state: _FSDPState,
1199
+ current_handle: Optional[FlatParamHandle],
1200
+ prefetch_mode: _PrefetchMode,
1201
+ ) -> None:
1202
+ """
1203
+ Prefetches the next handles if needed (without synchronization). An empty
1204
+ handles key cannot prefetch.
1205
+ """
1206
+ if not current_handle:
1207
+ return
1208
+ handle = _get_handle_to_prefetch(state, current_handle)
1209
+ if not handle:
1210
+ return
1211
+ # Temporarily emulate the training state while calling `_unshard` to
1212
+ # ensure the correct `as_params` for `_use_unsharded_views()`
1213
+ prev_training_state = handle._training_state
1214
+ if prefetch_mode == _PrefetchMode.BACKWARD:
1215
+ handle._training_state = HandleTrainingState.BACKWARD_PRE
1216
+ elif prefetch_mode == _PrefetchMode.FORWARD:
1217
+ handle._training_state = HandleTrainingState.FORWARD
1218
+ else:
1219
+ raise ValueError(f"Invalid prefetch mode on rank {state.rank}: {prefetch_mode}")
1220
+ # Prefetch the next set of handles without synchronizing to allow
1221
+ # the sync to happen as late as possible to maximize overlap
1222
+ _unshard(state, handle, state._unshard_stream, state._pre_unshard_stream)
1223
+ handle._training_state = prev_training_state
1224
+ handle._prefetched = True
1225
+
1226
+
1227
+ @no_type_check
1228
+ def _get_handle_to_prefetch(
1229
+ state: _FSDPState,
1230
+ current_handle: FlatParamHandle,
1231
+ ) -> FlatParamHandle:
1232
+ """
1233
+ Returns a :class:`list` of the handles keys to prefetch for the next
1234
+ module(s), where ``current_handle`` represents the current module.
1235
+
1236
+ "Prefetching" refers to running the unshard logic early (without
1237
+ synchronization), and the "next" modules depend on the recorded execution
1238
+ order and the current training state.
1239
+ """
1240
+ training_state = _get_training_state(current_handle)
1241
+ valid_training_states = (
1242
+ HandleTrainingState.BACKWARD_PRE,
1243
+ HandleTrainingState.BACKWARD_POST,
1244
+ HandleTrainingState.FORWARD,
1245
+ )
1246
+ _p_assert(
1247
+ training_state in valid_training_states,
1248
+ f"Prefetching is only supported in {valid_training_states} but "
1249
+ f"currently in {training_state}",
1250
+ )
1251
+ eod = state._exec_order_data
1252
+ target_handle: Optional[FlatParamHandle] = None
1253
+ if (
1254
+ training_state == HandleTrainingState.BACKWARD_PRE
1255
+ and state.backward_prefetch == BackwardPrefetch.BACKWARD_PRE
1256
+ ) or (
1257
+ training_state == HandleTrainingState.BACKWARD_POST
1258
+ and state.backward_prefetch == BackwardPrefetch.BACKWARD_POST
1259
+ ):
1260
+ target_handle_candidate = eod.get_handle_to_backward_prefetch(current_handle)
1261
+ if (
1262
+ target_handle_candidate
1263
+ and target_handle_candidate._needs_pre_backward_unshard
1264
+ and not target_handle_candidate._prefetched
1265
+ ):
1266
+ target_handle = target_handle_candidate
1267
+ else:
1268
+ target_handle = None
1269
+ elif training_state == HandleTrainingState.FORWARD and state.forward_prefetch:
1270
+ target_handle_candidate = eod.get_handle_to_forward_prefetch(current_handle)
1271
+ if (
1272
+ target_handle_candidate
1273
+ and target_handle_candidate._needs_pre_forward_unshard
1274
+ and not target_handle_candidate._prefetched
1275
+ ):
1276
+ target_handle = target_handle_candidate
1277
+ else:
1278
+ target_handle = None
1279
+
1280
+ return target_handle
1281
+
1282
+
1283
+ def _get_training_state(
1284
+ handle: FlatParamHandle,
1285
+ ) -> HandleTrainingState:
1286
+ """Returns the training state of the handles in ``handle``."""
1287
+ _p_assert(handle, "Expects a non-empty handle")
1288
+ return handle._training_state
1289
+
1290
+
1291
+ @no_type_check
1292
+ def _register_pre_forward_hook(
1293
+ state: _FSDPState,
1294
+ module: nn.Module,
1295
+ ) -> None:
1296
+ """
1297
+ Registers a pre-forward hook on ``module``.
1298
+ """
1299
+ for forward_handle in state._pre_forward_handles:
1300
+ forward_handle.remove()
1301
+ state._pre_forward_handles.clear()
1302
+ module_param_handle = state._fully_sharded_module_to_handle.get(module, None)
1303
+ hook = functools.partial(
1304
+ _pre_forward, state, module_param_handle, _pre_forward_unshard
1305
+ )
1306
+ state._pre_forward_handles.append(
1307
+ module.register_forward_pre_hook(hook, prepend=True, with_kwargs=True)
1308
+ )
1309
+
1310
+
1311
+ @no_type_check
1312
+ def _register_post_forward_hook(
1313
+ state: _FSDPState,
1314
+ module: nn.Module,
1315
+ ) -> None:
1316
+ """
1317
+ Registers a post-forward hook on ``module``. Even if the module has no
1318
+ handles, we should register the hook since it will register the module's
1319
+ pre-backward hook.
1320
+ """
1321
+ for forward_handle in state._post_forward_handles:
1322
+ forward_handle.remove()
1323
+ state._post_forward_handles.clear()
1324
+ module_param_handle = state._fully_sharded_module_to_handle.get(module, None)
1325
+ hook = functools.partial(
1326
+ _post_forward,
1327
+ state,
1328
+ module_param_handle,
1329
+ _post_forward_reshard,
1330
+ )
1331
+ state._post_forward_handles.append(module.register_forward_hook(hook))
1332
+
1333
+
1334
+ @no_type_check
1335
+ def _register_root_pre_forward_hook(
1336
+ state: _FSDPState,
1337
+ module: nn.Module,
1338
+ ):
1339
+ """
1340
+ Registers root pre-forward hook on ``module``, which should be the local
1341
+ FSDP root.
1342
+
1343
+ NOTE: For the current composable FSDP design, we have each application of
1344
+ ``fully_shard()`` to a module to indicate that that module is the local
1345
+ FSDP root. We may remove this assumption in the future, in which case we
1346
+ will need to register this root pre-forward hook on any candidate module
1347
+ that may be the local FSDP root.
1348
+ """
1349
+ for forward_handle in state._root_pre_forward_handles:
1350
+ forward_handle.remove()
1351
+ state._root_pre_forward_handles.clear()
1352
+ hook = functools.partial(_root_pre_forward, state)
1353
+ state._root_pre_forward_handles.append(
1354
+ module.register_forward_pre_hook(hook, prepend=True, with_kwargs=True)
1355
+ )
1356
+
1357
+
1358
+ @no_type_check
1359
+ def _register_pre_backward_hooks(
1360
+ state: _FSDPState,
1361
+ module: nn.Module,
1362
+ outputs: Any,
1363
+ handle: FlatParamHandle,
1364
+ ) -> None:
1365
+ """
1366
+ Registers pre-backward hooks on the tensors that require gradients in the
1367
+ forward pass outputs ``outputs``, which were computed using the
1368
+ ``FlatParameter`` s of ``handles``.
1369
+
1370
+ Args:
1371
+ module (nn.Module): Fully sharded module (see [Note: Fully Sharded
1372
+ Module]).
1373
+
1374
+ Returns:
1375
+ Forward pass outputs with pre-backward hooks registered to tensors that
1376
+ require gradients.
1377
+ """
1378
+ # If there is no gradient computation, then there is no need for
1379
+ # pre-backward logic
1380
+ if not torch.is_grad_enabled():
1381
+ return outputs
1382
+ if state._is_root:
1383
+ state._post_backward_callback_queued = False # only defined on the root
1384
+
1385
+ if handle:
1386
+ handle._needs_pre_backward_unshard = False
1387
+ # Since these handles' `FlatParameter`s participated in a forward, we
1388
+ # conservatively assume that they will be used in the backward
1389
+ handle._ran_pre_backward_hook = False
1390
+
1391
+ def _register_hook(t: torch.Tensor) -> torch.Tensor:
1392
+ if t.requires_grad:
1393
+ t.register_hook(
1394
+ torch.utils.hooks.unserializable_hook(
1395
+ functools.partial(_pre_backward_hook, state, module, handle)
1396
+ )
1397
+ )
1398
+ if handle:
1399
+ handle._needs_pre_backward_unshard = True
1400
+ return t
1401
+
1402
+ return _apply_to_tensors(_register_hook, outputs)
1403
+
1404
+
1405
+ def _register_post_backward_hook(
1406
+ state: _FSDPState,
1407
+ handle: Optional[FlatParamHandle],
1408
+ ) -> None:
1409
+ """
1410
+ Registers post-backward hooks on the ``FlatParameter`` s'
1411
+ ``AccumulateGrad`` objects to reshard and to reduce-scatter gradients.
1412
+
1413
+ The ``AccumulateGrad`` object represents the last function that finalizes
1414
+ the ``FlatParameter`` 's gradient, so it only runs after its entire
1415
+ gradient computation has finished.
1416
+
1417
+ We register the post-backward hook only once in the *first* forward that a
1418
+ ``FlatParameter`` participates in. This relies on the ``AccumulateGrad``
1419
+ object being preserved through multiple forwards.
1420
+
1421
+ NOTE: We follow this heuristic to prefer the *first* forward to target the
1422
+ parameter mixed precision case, where there are *separate*
1423
+ ``AccumulateGrad`` objects across the different forwards. (Without
1424
+ parameter mixed precision, the ``AccumulateGrad`` objects are the same.) If
1425
+ we instead prefer the *last* forward, then the hook runs early.
1426
+ """
1427
+ # If there is no gradient computation, then there is no need for
1428
+ # post-backward logic
1429
+ if not torch.is_grad_enabled():
1430
+ return
1431
+ if not handle:
1432
+ return
1433
+ flat_param = handle.flat_param
1434
+
1435
+ if torch.distributed._functional_collectives.is_torchdynamo_compiling():
1436
+ already_registered = hasattr(flat_param, "_post_backward_hook_handle")
1437
+ if already_registered or not flat_param.requires_grad:
1438
+ return
1439
+ hook = functools.partial(_post_backward_hook, state, handle)
1440
+ hook_handle = flat_param.register_post_accumulate_grad_hook(hook)
1441
+ flat_param._post_backward_hook_handle = hook_handle # type: ignore[attr-defined]
1442
+ else:
1443
+ already_registered = hasattr(flat_param, "_post_backward_hook_state")
1444
+ if already_registered or not flat_param.requires_grad:
1445
+ return
1446
+ # Get the `AccumulateGrad` object
1447
+ temp_flat_param = flat_param.expand_as(flat_param)
1448
+ _p_assert(
1449
+ temp_flat_param.grad_fn is not None,
1450
+ "The `grad_fn` is needed to access the `AccumulateGrad` and "
1451
+ "register the post-backward hook",
1452
+ )
1453
+ acc_grad = temp_flat_param.grad_fn.next_functions[0][0] # type: ignore[union-attr]
1454
+ assert acc_grad is not None
1455
+ hook_handle = acc_grad.register_hook(
1456
+ functools.partial(_post_backward_hook, state, handle)
1457
+ )
1458
+ flat_param._post_backward_hook_state = (acc_grad, hook_handle) # type: ignore[attr-defined]
1459
+
1460
+
1461
+ def _register_post_backward_reshard_only_hook(
1462
+ state: _FSDPState,
1463
+ handle: Optional[FlatParamHandle],
1464
+ args: Tuple[Any, ...],
1465
+ kwargs: Dict[str, Any],
1466
+ ) -> None:
1467
+ """
1468
+ Registers post-backward hooks to reshard flat parameters that do not
1469
+ require gradient. We register these using multi-post-grad hooks on the
1470
+ input activations to ensure that all gradients that may depend on the
1471
+ parameters have been computed before resharding.
1472
+ """
1473
+ # If there is no gradient computation, then there is no need for
1474
+ # post-backward logic
1475
+ if not torch.is_grad_enabled():
1476
+ return
1477
+ # Construct `inp_tensors` lazily to avoid CPU overhead in typical case
1478
+ # where each flat parameter requires gradient
1479
+ inp_tensors: Optional[List[torch.Tensor]] = None
1480
+ if not handle:
1481
+ return
1482
+ flat_param = handle.flat_param
1483
+
1484
+ if torch.distributed._functional_collectives.is_torchdynamo_compiling():
1485
+ already_registered = hasattr(flat_param, "_post_backward_hook_handle")
1486
+ else:
1487
+ already_registered = hasattr(flat_param, "_post_backward_hook_state")
1488
+
1489
+ if already_registered or flat_param.requires_grad:
1490
+ return
1491
+ if inp_tensors is None:
1492
+ args_flat = pytree.arg_tree_leaves(*args, **kwargs)
1493
+ inp_tensors = [
1494
+ obj for obj in args_flat if torch.is_tensor(obj) and obj.requires_grad
1495
+ ]
1496
+ assert inp_tensors is not None # mypy
1497
+ hook_handle = register_multi_grad_hook(
1498
+ inp_tensors, functools.partial(_post_backward_reshard_only_hook, state, handle)
1499
+ )
1500
+ if torch.distributed._functional_collectives.is_torchdynamo_compiling():
1501
+ flat_param._post_backward_hook_handle = hook_handle # type: ignore[attr-defined, assignment]
1502
+ else:
1503
+ flat_param._post_backward_hook_state = (hook_handle,) # type: ignore[attr-defined, assignment]
1504
+
1505
+
1506
+ @no_type_check
1507
+ def _register_post_backward_final_callback(
1508
+ state: _FSDPState, module: nn.Module
1509
+ ) -> None:
1510
+ """
1511
+ Registers the post-backward final callback that runs at the end of the
1512
+ backward pass. This should be called from the root FSDP instance at the
1513
+ beginning of the pre-backward.
1514
+ """
1515
+ _p_assert(
1516
+ state._is_root,
1517
+ "Only the root FSDP instance should register the post-backward callback",
1518
+ )
1519
+ if state._post_backward_callback_queued:
1520
+ return
1521
+ _assert_in_training_states(state, [TrainingState.IDLE])
1522
+ # Trace does not need this callback
1523
+ if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
1524
+ state._post_backward_callback_queued = True
1525
+ Variable._execution_engine.queue_callback(
1526
+ functools.partial(_post_backward_final_callback, state, module)
1527
+ )
1528
+
1529
+
1530
+ def _wait_for_computation_stream(
1531
+ computation_stream: torch.Stream,
1532
+ unshard_stream: torch.Stream,
1533
+ pre_unshard_stream: torch.Stream,
1534
+ ):
1535
+ """
1536
+ Has the unshard and pre-unshard streams wait for the computation stream.
1537
+ For example, this should be called in the FSDP root's pre-forward to
1538
+ respect optimizer step computation.
1539
+ """
1540
+ # Tracing does not need to wait
1541
+ if torch.distributed._functional_collectives.is_torchdynamo_compiling():
1542
+ return
1543
+ unshard_stream.wait_stream(computation_stream) # type: ignore[attr-defined]
1544
+ # Having the pre-all-gather stream wait for the current stream even if we
1545
+ # do not leverage the pre-all-gather stream is tolerable since this only
1546
+ # runs once per iteration
1547
+ pre_unshard_stream.wait_stream(computation_stream) # type: ignore[attr-defined]
1548
+
1549
+
1550
+ def _reset_flat_param_grad_info_if_needed(
1551
+ handles: List[FlatParamHandle],
1552
+ ):
1553
+ """
1554
+ Clears the original parameters' gradients if needed. This method's CPU
1555
+ overhead is minimal, so we may call it throughout FSDP methods, which serve
1556
+ as callsites to free the gradient memory earlier.
1557
+ """
1558
+ if not isinstance(handles, list):
1559
+ handles = [handles]
1560
+ for handle in handles:
1561
+ if handle._use_orig_params:
1562
+ handle._reset_flat_param_grad_info_if_needed()
1563
+
1564
+
1565
+ @no_type_check
1566
+ def _get_buffers_and_dtypes_for_computation(
1567
+ state: _FSDPState,
1568
+ root_module: nn.Module,
1569
+ ) -> Tuple[List[torch.Tensor], List[Optional[torch.dtype]]]:
1570
+ """
1571
+ Returns all buffers in the module tree rooted at ``root_module`` and a
1572
+ corresponding list of the buffer dtypes for computation. Each buffer dtype
1573
+ is either ``None`` if buffer mixed precision is not enabled or the buffer
1574
+ low precision dtype otherwise.
1575
+ """
1576
+ _p_assert(state._is_root, "Expects the root to cast buffers")
1577
+ buffers: List[torch.Tensor] = []
1578
+ buffer_dtypes: List[Optional[torch.dtype]] = []
1579
+ visited_buffers: Set[torch.Tensor] = set()
1580
+ # Traverse the FSDP states bottom-up so that we prefer the owning FSDP
1581
+ # instance's mixed precision setting for each buffer
1582
+ fsdp_states, fsdp_modules = traversal_utils._get_fsdp_states_with_modules(
1583
+ root_module
1584
+ )
1585
+ for fsdp_state, fsdp_module in zip(reversed(fsdp_states), reversed(fsdp_modules)):
1586
+ for buffer_name, buffer in fsdp_module.named_buffers():
1587
+ if buffer in visited_buffers:
1588
+ continue
1589
+ visited_buffers.add(buffer)
1590
+ if clean_tensor_name(buffer_name) in fsdp_state._ignored_buffer_names:
1591
+ continue
1592
+ buffers.append(buffer)
1593
+ buffer_dtypes.append(fsdp_state.mixed_precision.buffer_dtype)
1594
+ assert len(buffers) == len(buffer_dtypes), f"{len(buffers)} {len(buffer_dtypes)}"
1595
+ return buffers, buffer_dtypes
1596
+
1597
+
1598
+ @no_type_check
1599
+ def _get_orig_buffer_dtypes(
1600
+ state: _FSDPState,
1601
+ buffer_names: List[str],
1602
+ ) -> List[torch.dtype]:
1603
+ """
1604
+ Returns the original buffer types of the given buffer names.
1605
+ """
1606
+ buffer_dtypes: List[torch.dtype] = []
1607
+ for buffer_name in buffer_names:
1608
+ _p_assert(
1609
+ buffer_name in state._buffer_name_to_orig_dtype,
1610
+ f"{buffer_name} is missing from pre-computed dict on rank "
1611
+ f"{state.rank}, which only has keys "
1612
+ f"{state._buffer_name_to_orig_dtype.keys()}",
1613
+ )
1614
+ buffer_dtypes.append(state._buffer_name_to_orig_dtype[buffer_name])
1615
+ return buffer_dtypes
1616
+
1617
+
1618
+ def _cast_buffers_to_dtype_and_device(
1619
+ buffers: List[torch.Tensor],
1620
+ buffer_dtypes: List[Optional[torch.dtype]],
1621
+ device: torch.device,
1622
+ ) -> None:
1623
+ """
1624
+ Casts ``buffers`` to the dtypes given by ``buffer_dtypes`` and moves them
1625
+ to ``device``. If an element in ``buffer_dtypes`` is ``None``, then the
1626
+ corresponding buffer is only moved to ``device``.
1627
+ """
1628
+ _p_assert(
1629
+ buffer_dtypes is None or len(buffers) == len(buffer_dtypes),
1630
+ f"Expects `buffers` and `buffer_dtypes` to have the same length if "
1631
+ f"`buffer_dtypes` is specified but got {len(buffers)} and "
1632
+ f"{len(buffer_dtypes)}",
1633
+ )
1634
+ for buffer, buffer_dtype in zip(buffers, buffer_dtypes):
1635
+ if not torch.is_floating_point(buffer) or buffer_dtype is None:
1636
+ buffer.data = buffer.to(device=device)
1637
+ else:
1638
+ buffer.data = buffer.to(device=device, dtype=buffer_dtype)
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_shard_utils.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import copy
3
+ import itertools
4
+ import math
5
+ from typing import Optional
6
+
7
+ import torch
8
+ import torch.distributed as dist
9
+ from torch._utils import _get_device_module
10
+ from torch.distributed import distributed_c10d
11
+ from torch.distributed._shard.sharded_tensor import (
12
+ Shard,
13
+ ShardedTensor,
14
+ ShardedTensorMetadata,
15
+ TensorProperties,
16
+ )
17
+ from torch.distributed._shard.sharding_spec import ShardMetadata
18
+ from torch.distributed.tensor import DeviceMesh, DTensor, Replicate, Shard as DShard
19
+
20
+
21
+ def _get_remote_device_str(rank, device_type, num_devices_per_node):
22
+ if device_type.lower() == "cpu":
23
+ return f"rank:{rank}/{device_type}"
24
+ elif device_type.lower() == "hpu":
25
+ return f"rank:{rank}/{device_type}:{_get_device_module(device_type).current_device()}"
26
+ else:
27
+ return f"rank:{rank}/{device_type}:{rank % num_devices_per_node}"
28
+
29
+
30
+ def _create_chunk_sharded_tensor(
31
+ tensor: torch.Tensor,
32
+ rank: int,
33
+ world_size: int,
34
+ num_devices_per_node: int,
35
+ pg: dist.ProcessGroup,
36
+ device: Optional[torch.device] = None,
37
+ ) -> ShardedTensor:
38
+ """
39
+ Shard a tensor to chunks along the first dimension. The local rank will gets its
40
+ corresponding chunk as the local shard to create a ShardedTensor.
41
+ """
42
+ chunks = tensor.chunk(world_size, dim=0)
43
+ if len(chunks) > rank:
44
+ local_shard = chunks[rank].clone()
45
+ offsets = [0 for _ in tensor.size()]
46
+ offsets[0] = math.ceil(tensor.size()[0] / world_size) * rank
47
+ local_shards = [Shard.from_tensor_and_offsets(local_shard, offsets, rank)]
48
+ else:
49
+ local_shards = []
50
+
51
+ # Create a ShardedTensor without invoking communication.
52
+ chunk_sizes = [list(chunk.size()) for chunk in chunks]
53
+ dim0_offsets = [0] + list(
54
+ itertools.accumulate([chunk_size[0] for chunk_size in chunk_sizes])
55
+ )[:-1]
56
+ offsets = [0] * (len(chunk_sizes[0]) - 1)
57
+ chunk_offsets = [[d0] + offsets for d0 in dim0_offsets]
58
+ device_type = (
59
+ distributed_c10d._get_pg_default_device(pg).type
60
+ if device is None
61
+ else device.type
62
+ )
63
+ placements = [
64
+ _get_remote_device_str(
65
+ dist.get_global_rank(pg, r),
66
+ device_type,
67
+ num_devices_per_node,
68
+ )
69
+ for r in range(len(chunk_sizes))
70
+ ]
71
+ assert len(chunk_sizes) == len(chunk_offsets) == len(placements)
72
+ shard_metadata = [
73
+ ShardMetadata(offset, size, placement)
74
+ for offset, size, placement in zip(chunk_offsets, chunk_sizes, placements)
75
+ ]
76
+ sharded_tensor_metadata = ShardedTensorMetadata(
77
+ shards_metadata=shard_metadata,
78
+ size=tensor.size(),
79
+ tensor_properties=TensorProperties(
80
+ dtype=tensor.dtype,
81
+ layout=tensor.layout,
82
+ requires_grad=False,
83
+ memory_format=torch.contiguous_format,
84
+ pin_memory=tensor.is_pinned(),
85
+ ),
86
+ )
87
+ return ShardedTensor._init_from_local_shards_and_global_metadata(
88
+ local_shards, sharded_tensor_metadata=sharded_tensor_metadata, process_group=pg
89
+ )
90
+
91
+
92
+ def _create_chunk_dtensor(
93
+ tensor: torch.Tensor,
94
+ rank: int,
95
+ device_mesh: DeviceMesh,
96
+ ) -> DTensor:
97
+ """
98
+ Shard a tensor to chunks along the first dimension. The local rank will gets its
99
+ corresponding chunk as the local tensor to create a DTensor.
100
+ """
101
+ # We need to explicitly call .detach() to return a new tensor detached from the current graph.
102
+ tensor = tensor.clone().detach()
103
+
104
+ # FSDP placements: [Shard(0)]
105
+ # HSDP placements: [Replicate(), Shard(0)]
106
+ replicate_placements = [Replicate() for _ in range(device_mesh.ndim)]
107
+ shard_placements = [Replicate() for _ in range(device_mesh.ndim)]
108
+ shard_placements[-1] = DShard(0) # type: ignore[call-overload]
109
+
110
+ return DTensor.from_local(
111
+ tensor, device_mesh, replicate_placements, run_check=False
112
+ ).redistribute(
113
+ placements=shard_placements,
114
+ )
115
+
116
+
117
+ def _all_gather_dtensor(
118
+ tensor: DTensor,
119
+ root_mesh: Optional[DeviceMesh],
120
+ ) -> torch.Tensor:
121
+ """
122
+ All gather a DTensor in its sharded dimension and return the local tensor.
123
+ """
124
+ assert (
125
+ root_mesh == tensor.device_mesh
126
+ ), "The device mesh of a tensor should be a root mesh."
127
+
128
+ placements = list(copy.deepcopy(tensor.placements))
129
+ # FSDP placements: [Shard(0)] -> [Replicate()]
130
+ # HSDP placements: [Replicate(), Shard(0)] -> [Replicate(), Replicate()]
131
+ placements[-1] = Replicate()
132
+ tensor = tensor.redistribute(
133
+ device_mesh=tensor.device_mesh,
134
+ placements=placements,
135
+ )
136
+
137
+ return tensor.to_local()
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_state_dict_utils.py ADDED
@@ -0,0 +1,924 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import contextlib
3
+ import logging
4
+ import math
5
+ import warnings
6
+ from typing import (
7
+ Any,
8
+ Callable,
9
+ cast,
10
+ Dict,
11
+ Generator,
12
+ Iterator,
13
+ List,
14
+ no_type_check,
15
+ Tuple,
16
+ )
17
+
18
+ import torch
19
+ import torch.distributed as dist
20
+ import torch.distributed.algorithms._checkpoint.checkpoint_wrapper as checkpoint_wrapper
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+ from torch.distributed._shard.sharded_tensor import (
24
+ init_from_local_shards,
25
+ Shard,
26
+ ShardedTensor,
27
+ )
28
+ from torch.distributed.device_mesh import _mesh_resources
29
+ from torch.distributed.fsdp._common_utils import (
30
+ _FSDPState,
31
+ _get_module_fsdp_state_if_fully_sharded_module,
32
+ _has_fsdp_params,
33
+ _is_composable,
34
+ _module_handle,
35
+ clean_tensor_name,
36
+ FSDP_PREFIX,
37
+ FSDP_WRAPPED_MODULE,
38
+ )
39
+ from torch.distributed.fsdp._debug_utils import SimpleProfiler
40
+ from torch.distributed.fsdp._runtime_utils import (
41
+ _cast_buffers_to_dtype_and_device,
42
+ _get_orig_buffer_dtypes,
43
+ _lazy_init,
44
+ _reset_flat_param_grad_info_if_needed,
45
+ )
46
+ from torch.distributed.fsdp.api import (
47
+ FullStateDictConfig,
48
+ ShardingStrategy,
49
+ StateDictType,
50
+ )
51
+ from torch.distributed.tensor import DTensor
52
+ from torch.distributed.utils import _replace_by_prefix
53
+
54
+ from ._fsdp_extensions import (
55
+ _ext_all_gather_dtensor,
56
+ _ext_chunk_dtensor,
57
+ _ext_chunk_tensor,
58
+ _ext_post_unflatten_transform,
59
+ _ext_pre_load_state_dict_transform,
60
+ )
61
+ from ._unshard_param_utils import _unshard_fsdp_state_params, FLAT_PARAM
62
+
63
+
64
+ logger = logging.getLogger(__name__)
65
+
66
+
67
+ def _should_unshard_params(fsdp_state: _FSDPState) -> bool:
68
+ return not (
69
+ fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD
70
+ and (_is_composable(fsdp_state) or fsdp_state._use_orig_params)
71
+ )
72
+
73
+
74
+ def _convert_to_wrapped_module_name(module_name: str) -> str:
75
+ module_name = module_name.replace(f"{FSDP_PREFIX}", "")
76
+ module_name = module_name.replace(f"{FSDP_WRAPPED_MODULE}", "")
77
+ if module_name:
78
+ module_name = f"{module_name}."
79
+ # `CheckpointWrapper` adds a prefix that has to be removed as well.
80
+ module_name = module_name.replace(checkpoint_wrapper._CHECKPOINT_PREFIX, "")
81
+ return module_name
82
+
83
+
84
+ def _param_name_infos(
85
+ module: nn.Module, fsdp_state: _FSDPState
86
+ ) -> Iterator[Tuple[str, str, str]]:
87
+ if not _has_fsdp_params(fsdp_state, module):
88
+ return
89
+ for param_name, module_name in _module_handle(
90
+ fsdp_state, module
91
+ ).param_module_names():
92
+ module_name = _convert_to_wrapped_module_name(module_name)
93
+ fqn = f"{module_name}{param_name}"
94
+ yield fqn, param_name, module_name
95
+
96
+
97
+ def _shared_param_name_infos(
98
+ module: nn.Module, fsdp_state
99
+ ) -> Iterator[Tuple[str, str, str]]:
100
+ for param_name, module_name in _module_handle(
101
+ fsdp_state, module
102
+ ).shared_param_module_names():
103
+ module_name = _convert_to_wrapped_module_name(module_name)
104
+ fqn = f"{module_name}{param_name}"
105
+ yield fqn, param_name, module_name
106
+
107
+
108
+ @no_type_check
109
+ def _enter_unshard_params_ctx(
110
+ module: nn.Module,
111
+ fsdp_state: _FSDPState,
112
+ writeback: bool = False,
113
+ rank0_only: bool = False,
114
+ offload_to_cpu: bool = False,
115
+ with_grads: bool = False,
116
+ ) -> None:
117
+ """
118
+ state_dict hooks cannot use the pure context call as the checkpoint flow
119
+ requires to enter the context in the pre-hook but leave the context in the
120
+ post-hook. This API enters the context of ``_unshard_fsdp_state_params``.
121
+ """
122
+ assert module not in fsdp_state._unshard_params_ctx, (
123
+ "Entering the ``_unshard_fsdp_state_params`` context but _unshard_params_ctx[module] "
124
+ "is not None."
125
+ )
126
+ fsdp_state._unshard_params_ctx[module] = _unshard_fsdp_state_params(
127
+ module,
128
+ fsdp_state,
129
+ writeback=writeback,
130
+ rank0_only=rank0_only,
131
+ offload_to_cpu=offload_to_cpu,
132
+ with_grads=with_grads,
133
+ )
134
+ fsdp_state._unshard_params_ctx[module].__enter__()
135
+
136
+
137
+ @no_type_check
138
+ def _exit_unshard_params_ctx(module: nn.Module, fsdp_state: _FSDPState) -> None:
139
+ """A helper function to exit ``_unshard_fsdp_state_params`` context."""
140
+ fsdp_state._unshard_params_ctx[module].__exit__(None, None, None)
141
+ fsdp_state._unshard_params_ctx.pop(module)
142
+
143
+
144
+ def _common_pre_state_dict_hook(
145
+ module: nn.Module,
146
+ fsdp_state: _FSDPState,
147
+ ) -> None:
148
+ """Performs the pre-state_dict tasks shared by all state_dict types."""
149
+ if fsdp_state._device_handle.is_available():
150
+ fsdp_state._device_handle.synchronize()
151
+ # TODO: need to check if this is always correct for composable FSDP.
152
+ _lazy_init(fsdp_state, module)
153
+ if fsdp_state._is_root:
154
+ _reset_flat_param_grad_info_if_needed(fsdp_state._all_handles)
155
+
156
+
157
+ def _common_unshard_pre_state_dict_hook(
158
+ module: nn.Module,
159
+ fsdp_state: _FSDPState,
160
+ offload_to_cpu: bool,
161
+ rank0_only: bool,
162
+ ) -> None:
163
+ """
164
+ Performs the pre-state_dict tasks shared by all state_dict types that require
165
+ ``_unshard_fsdp_state_params()``. FULL_STATE_DICT and SHARDED_STATE_DICT use this hook.
166
+ """
167
+ # For composable `fully_shard`, it does not need to unshard parameters for `NO_SHARD` cases.
168
+ if not _should_unshard_params(fsdp_state):
169
+ return
170
+ _enter_unshard_params_ctx(
171
+ module,
172
+ fsdp_state,
173
+ writeback=False,
174
+ offload_to_cpu=offload_to_cpu,
175
+ rank0_only=rank0_only,
176
+ )
177
+
178
+
179
+ @no_type_check
180
+ def _common_unshard_post_state_dict_hook(
181
+ module: nn.Module,
182
+ fsdp_state: _FSDPState,
183
+ state_dict: Dict[str, Any],
184
+ prefix: str,
185
+ param_hook: Callable,
186
+ ) -> Dict[str, Any]:
187
+ """
188
+ The post-state_dict flow that shared by all state_dict types that require
189
+ ``_unshard_fsdp_state_params()``. FULL_STATE_DICT and SHARDED_STATE_DICT use this
190
+ hook.
191
+ """
192
+ _replace_by_prefix(state_dict, prefix + f"{FSDP_PREFIX}", prefix)
193
+ # Return early for trivial cases
194
+ if not state_dict or not _has_fsdp_params(fsdp_state, module):
195
+ if _should_unshard_params(fsdp_state):
196
+ _exit_unshard_params_ctx(module, fsdp_state)
197
+ return state_dict
198
+
199
+ # If a rank does not have unsharded parameters(when `rank0_only=True`
200
+ # and `rank != 0`), then the rank only needed to participate in the
201
+ # all-gather and does not need to save the # state dict. We simply check
202
+ # rank0_only to ensure this issue.
203
+ rank0_only = (
204
+ fsdp_state._state_dict_type == StateDictType.FULL_STATE_DICT
205
+ and cast(FullStateDictConfig, fsdp_state._state_dict_config).rank0_only
206
+ )
207
+ # no_fsdp_return means the state_dict returned by this rank should contain
208
+ # only non-FSDP controlled parameters and buffers.
209
+ no_fsdp_return = rank0_only and fsdp_state.rank != 0
210
+ if no_fsdp_return and not fsdp_state._use_orig_params:
211
+ for clean_key in fsdp_state._buffer_names:
212
+ # This is a hack to support activation checkpoint.
213
+ clean_key = clean_key.replace(
214
+ f"{checkpoint_wrapper._CHECKPOINT_PREFIX}.", ""
215
+ )
216
+ state_dict.pop(f"{prefix}{clean_key}", None)
217
+ # Non-zero ranks have flat_param key when rank0_only=True, because rank0_only=True is
218
+ # passed in to unshard context, but nonzero ranks reshard early, causing this flat_param
219
+ # to appear in state_dict.
220
+ state_dict.pop(f"{prefix}{FLAT_PARAM}")
221
+ _exit_unshard_params_ctx(module, fsdp_state)
222
+ return state_dict
223
+
224
+ # Loop only the parameters saved in this instance's wrapped module to
225
+ # avoid processing buffers.
226
+ for fqn, param_name, module_name in _param_name_infos(module, fsdp_state):
227
+ fqn = f"{prefix}{fqn}"
228
+ if no_fsdp_return:
229
+ state_dict.pop(fqn)
230
+ continue
231
+ assert fqn in state_dict, (
232
+ f"FSDP assumes {fqn} is in the state_dict but the state_dict only "
233
+ f"has {state_dict.keys()}. "
234
+ f"prefix={prefix}, module_name={module_name}, "
235
+ f"param_name={param_name} rank={fsdp_state.rank}."
236
+ )
237
+
238
+ param_hook(state_dict, prefix, fqn)
239
+
240
+ if _should_unshard_params(fsdp_state):
241
+ _exit_unshard_params_ctx(module, fsdp_state)
242
+
243
+ cpu_device = torch.device("cpu")
244
+ buffer_clean_fqns = []
245
+ buffers = []
246
+ for clean_key in fsdp_state._buffer_names:
247
+ # This is a hack to support activation checkpoint.
248
+ clean_key = clean_tensor_name(clean_key)
249
+ fqn = f"{prefix}{clean_key}"
250
+ if fqn not in state_dict:
251
+ # A buffer can be registered as non-persistent.
252
+ continue
253
+ if no_fsdp_return:
254
+ state_dict.pop(fqn)
255
+ else:
256
+ buffer = state_dict[fqn]
257
+ if (
258
+ fsdp_state._state_dict_config.offload_to_cpu
259
+ and buffer.device != cpu_device
260
+ ):
261
+ state_dict[fqn] = buffer.to(cpu_device)
262
+ # skip upcasting for ignored buffers
263
+ if clean_key not in fsdp_state._ignored_buffer_names:
264
+ buffer_clean_fqns.append(clean_key)
265
+ buffers.append(state_dict[fqn])
266
+
267
+ if buffers:
268
+ mixed_precision_enabled_for_buffers = (
269
+ fsdp_state._mixed_precision_enabled_for_buffers()
270
+ if not _is_composable(fsdp_state)
271
+ else (fsdp_state.mixed_precision.buffer_dtype is not None)
272
+ )
273
+ if mixed_precision_enabled_for_buffers:
274
+ buffer_dtypes = _get_orig_buffer_dtypes(fsdp_state, buffer_clean_fqns)
275
+ _cast_buffers_to_dtype_and_device(
276
+ buffers, buffer_dtypes, fsdp_state.compute_device
277
+ )
278
+ for buffer, clean_fqn in zip(buffers, buffer_clean_fqns):
279
+ fqn = f"{prefix}{clean_fqn}"
280
+ logger.info("FSDP is casting the dtype of %s to %s", fqn, buffer.dtype)
281
+ state_dict[fqn] = buffer.clone()
282
+ return state_dict
283
+
284
+
285
+ @no_type_check
286
+ def _full_pre_state_dict_hook(
287
+ fsdp_state: _FSDPState,
288
+ module: nn.Module,
289
+ *args,
290
+ **kwargs,
291
+ ) -> None:
292
+ """
293
+ Hook that runs before model.state_dict() is called. pre-state_dict hook is
294
+ not actually supported by ``nn.Module``. As a result, this API is called
295
+ from ``_full_post_state_dict_hook()`` to simulate the case. Once pre-state_dict
296
+ is supported in ``nn.Module``, this hook will be registered as a hook in
297
+ ``nn.Module``.
298
+ """
299
+ if getattr(fsdp_state, "_device_mesh", False):
300
+ root_mesh = _mesh_resources.get_root_mesh(fsdp_state._device_mesh)
301
+
302
+ _common_pre_state_dict_hook(module, fsdp_state)
303
+ _common_unshard_pre_state_dict_hook(
304
+ module,
305
+ fsdp_state,
306
+ offload_to_cpu=fsdp_state._state_dict_config.offload_to_cpu,
307
+ rank0_only=cast(FullStateDictConfig, fsdp_state._state_dict_config).rank0_only,
308
+ )
309
+
310
+
311
+ @no_type_check
312
+ def _full_post_state_dict_hook(
313
+ module: nn.Module,
314
+ fsdp_state: _FSDPState,
315
+ state_dict: Dict[str, Any],
316
+ prefix: str,
317
+ ) -> Dict[str, Any]:
318
+ """
319
+ Hook that runs after model.state_dict() is called before returning result to
320
+ user. For FSDP, we may have to clone the tensors in state_dict as params go
321
+ back to sharded version after _unshard_fsdp_state_params ends, and also remove
322
+ the ``FSDP_WRAPPED_MODULE`` prefix.
323
+ """
324
+
325
+ def param_hook(
326
+ state_dict: Dict[str, Any],
327
+ prefix: str,
328
+ fqn: str,
329
+ ) -> None:
330
+ clean_key = fqn
331
+ clean_prefix = clean_tensor_name(prefix)
332
+ # Strip prefix out of key if needed as buffer names and param names
333
+ # do not have prefix considered as they are not computed in `state_dict`
334
+ # call.
335
+ if clean_key.startswith(clean_prefix):
336
+ clean_key = clean_key[len(clean_prefix) :]
337
+
338
+ # Clone parameters before exiting the `_unshard_fsdp_state_params()` context.
339
+ if not getattr(state_dict[fqn], "_has_been_cloned", False):
340
+ try:
341
+ state_dict[fqn] = state_dict[fqn].clone().detach()
342
+ state_dict[fqn]._has_been_cloned = True # type: ignore[attr-defined]
343
+ except BaseException as e:
344
+ warnings.warn(
345
+ f"Failed to clone() tensor with name {fqn} on rank {fsdp_state.rank}. "
346
+ "This may mean that this state_dict entry could point to invalid "
347
+ "memory regions after returning from state_dict() call if this "
348
+ "parameter is managed by FSDP. Please check clone "
349
+ f"implementation of {fqn}. Error: {str(e)}"
350
+ )
351
+
352
+ return _common_unshard_post_state_dict_hook(
353
+ module, fsdp_state, state_dict, prefix, param_hook
354
+ )
355
+
356
+
357
+ def _full_pre_load_state_dict_hook(
358
+ module: nn.Module,
359
+ fsdp_state: _FSDPState,
360
+ state_dict: Dict[str, Any],
361
+ prefix: str,
362
+ ) -> None:
363
+ _lazy_init(fsdp_state, module)
364
+ if _should_unshard_params(fsdp_state):
365
+ with SimpleProfiler.profile("_enter_unshard_params_ctx"):
366
+ _enter_unshard_params_ctx(module, fsdp_state, writeback=True)
367
+ # Add FSDP_PREFIX only for wrapper-based FSDP.
368
+ if not _is_composable(fsdp_state):
369
+ _replace_by_prefix(state_dict, prefix, prefix + f"{FSDP_PREFIX}")
370
+
371
+
372
+ def _full_post_load_state_dict_hook(
373
+ module: nn.Module, fsdp_state: _FSDPState, *args, **kwargs
374
+ ) -> None:
375
+ if _should_unshard_params(fsdp_state):
376
+ with SimpleProfiler.profile("_exit_unshard_params_ctx"):
377
+ _exit_unshard_params_ctx(module, fsdp_state)
378
+
379
+
380
+ def _local_pre_state_dict_hook(
381
+ fsdp_state: _FSDPState,
382
+ module: nn.Module,
383
+ *args,
384
+ **kwargs,
385
+ ) -> None:
386
+ """
387
+ Hook that runs before model.state_dict() is called. Right now, pre-state_dict
388
+ hook is not supported by the PyTorch core. So this API is called from
389
+ `_local_post_state_dict_hook()` to simulate the case.
390
+ """
391
+ if (
392
+ _has_fsdp_params(fsdp_state, module)
393
+ and not _module_handle(fsdp_state, module).uses_sharded_strategy
394
+ ):
395
+ raise RuntimeError(
396
+ "``local_state_dict`` can only be used when parameters are flatten "
397
+ "and sharded."
398
+ )
399
+ _common_pre_state_dict_hook(module, fsdp_state)
400
+
401
+
402
+ @no_type_check
403
+ def _local_post_state_dict_hook(
404
+ module: nn.Module,
405
+ fsdp_state: _FSDPState,
406
+ state_dict: Dict[str, Any],
407
+ prefix: str,
408
+ ) -> Dict[str, Any]:
409
+ """
410
+ This hook create a ShardedTensor from the local flat_param and replace
411
+ the state_dict[f"{prefix}{FLAT_PARAM}] with the ShardedTensor. No copy
412
+ will happen. The underlying storage is the same.
413
+ """
414
+
415
+ _replace_by_prefix(state_dict, f"{prefix}{FSDP_PREFIX}", prefix)
416
+ if not _has_fsdp_params(fsdp_state, module):
417
+ return state_dict
418
+
419
+ # state_dict[f"{prefix}{FLAT_PARAM}"] exists and has the same tensor
420
+ # value as the flat_param but it is a pure Tensor because
421
+ # nn.Module.state_dict() will detach the parameter. Therefore, we need
422
+ # to get flat_param to get the metadata.
423
+ assert _module_handle(fsdp_state, module), "Should have returned early"
424
+ flat_param = _module_handle(fsdp_state, module).flat_param
425
+ # Constructs a ShardedTensor from the flat_param "without" padding.
426
+ # Removing the padding allows users to change the number of ranks
427
+ # when loading the local_state_dict.
428
+ full_numel = flat_param._unpadded_unsharded_size.numel() # type: ignore[attr-defined]
429
+ shard_offset = flat_param.numel() * fsdp_state.rank
430
+ valid_data_size = flat_param.numel() - flat_param._shard_numel_padded
431
+ if valid_data_size > 0:
432
+ # If FlatParameter is returned, FlatParameter._local_shard cause a
433
+ # pickling issue (can be torch.save but not torch.load). Since there
434
+ # is no benefit for state_dict to return the actual FlatParameter class,
435
+ # a view (which is a tensor) of the FlatParameter will be returned.
436
+ flat_param = flat_param[:valid_data_size].view(valid_data_size)
437
+ local_shards = [
438
+ Shard.from_tensor_and_offsets(flat_param, [shard_offset], fsdp_state.rank)
439
+ ]
440
+ else:
441
+ local_shards = []
442
+ sharded_tensor = init_from_local_shards(
443
+ local_shards, full_numel, process_group=fsdp_state.process_group
444
+ ) # type: ignore[assignment]
445
+ # TODO: Add DTensor state_dict support for LOCAL_STATE_DICT.
446
+ if fsdp_state._state_dict_config.offload_to_cpu:
447
+ sharded_tensor = sharded_tensor.cpu()
448
+ state_dict[f"{prefix}{FLAT_PARAM}"] = sharded_tensor
449
+ return state_dict
450
+
451
+
452
+ def _local_post_load_state_dict_hook(
453
+ module: nn.Module, fsdp_state: _FSDPState, *args, **kwargs
454
+ ) -> None:
455
+ pass
456
+
457
+
458
+ def _local_pre_load_state_dict_hook(
459
+ module: nn.Module,
460
+ fsdp_state: _FSDPState,
461
+ state_dict: Dict[str, Any],
462
+ prefix: str,
463
+ ) -> None:
464
+ """
465
+ This hook finds the local flat_param for this FSDP module from the
466
+ state_dict. The flat_param should be a ShardedTensor. This hook converts
467
+ the ShardedTensor to a tensor. No copy happen unless padding is required.
468
+ """
469
+ _lazy_init(fsdp_state, module)
470
+ _replace_by_prefix(state_dict, prefix, f"{prefix}{FSDP_PREFIX}")
471
+ fqn = f"{prefix}{FSDP_PREFIX}{FLAT_PARAM}"
472
+ if fqn not in state_dict:
473
+ assert not _has_fsdp_params(fsdp_state, module), (
474
+ "No `FlatParameter` in `state_dict` for this FSDP instance "
475
+ "but it has parameters"
476
+ )
477
+ return
478
+ load_tensor = state_dict[fqn]
479
+ assert isinstance(
480
+ load_tensor, ShardedTensor
481
+ ), "Tensors in local_state_dict should be ShardedTensor."
482
+
483
+ # Convert the ShardedTensor to a Tensor.
484
+ flat_param = _module_handle(fsdp_state, module).flat_param
485
+ assert flat_param is not None
486
+ valid_data_size = flat_param.numel() - flat_param._shard_numel_padded
487
+ shards = load_tensor.local_shards()
488
+ if valid_data_size > 0:
489
+ assert len(shards), "load_local_state_dict assume one shard per ShardedTensor."
490
+ load_tensor = shards[0].tensor
491
+
492
+ # Get the metadata of the flat_param to decide whether to pad the loaded
493
+ # tensor.
494
+ if flat_param._shard_numel_padded > 0:
495
+ assert load_tensor.numel() < flat_param.numel(), (
496
+ f"Local shard size = {flat_param.numel()} and the tensor in "
497
+ f"the state_dict is {load_tensor.numel()}."
498
+ )
499
+ load_tensor = F.pad(load_tensor, [0, flat_param._shard_numel_padded])
500
+ else:
501
+ load_tensor = flat_param
502
+ # TODO: Add DTensor state_dict support for LOCAL_STATE_DICT.
503
+ state_dict[fqn] = load_tensor
504
+
505
+
506
+ def _sharded_pre_state_dict_hook(
507
+ fsdp_state: _FSDPState,
508
+ module: nn.Module,
509
+ *args,
510
+ **kwargs,
511
+ ) -> None:
512
+ """
513
+ Hook that runs before model.state_dict() is called. Check
514
+ ``_full_pre_load_state_dict_hook`` for the detail.
515
+ """
516
+ if (
517
+ _has_fsdp_params(fsdp_state, module)
518
+ and not _module_handle(fsdp_state, module).uses_sharded_strategy
519
+ ):
520
+ raise RuntimeError(
521
+ "``sharded_state_dict`` can only be used when parameters are flatten "
522
+ "and sharded."
523
+ )
524
+ _common_pre_state_dict_hook(module, fsdp_state)
525
+ # Setting offload_to_cpu here does not work even if offload_to_cpu is True.
526
+ # We have to create ShardedTensor first then move it to CPU.
527
+ _common_unshard_pre_state_dict_hook(
528
+ module,
529
+ fsdp_state,
530
+ offload_to_cpu=False,
531
+ rank0_only=False,
532
+ )
533
+
534
+
535
+ @no_type_check
536
+ def _sharded_post_state_dict_hook(
537
+ module: nn.Module,
538
+ fsdp_state: _FSDPState,
539
+ state_dict: Dict[str, Any],
540
+ prefix: str,
541
+ ) -> Dict[str, Any]:
542
+ """
543
+ The hook replaces the unflattened, unsharded parameter in the state_dict
544
+ with a unflattened, sharded parameter (a ShardedTensor).
545
+ """
546
+
547
+ def param_hook(state_dict: Dict[str, Any], prefix: str, fqn: str):
548
+ param = state_dict[fqn]
549
+ if not fsdp_state._state_dict_config._use_dtensor:
550
+ sharded_tensor = _ext_chunk_tensor(
551
+ tensor=param,
552
+ rank=fsdp_state.rank,
553
+ world_size=fsdp_state.world_size,
554
+ num_devices_per_node=fsdp_state._device_handle.device_count(),
555
+ pg=fsdp_state.process_group,
556
+ fsdp_extension=fsdp_state._fsdp_extension,
557
+ )
558
+ else:
559
+ sharded_tensor = _ext_chunk_dtensor(
560
+ tensor=param,
561
+ rank=fsdp_state.rank,
562
+ device_mesh=fsdp_state._device_mesh,
563
+ fsdp_extension=fsdp_state._fsdp_extension,
564
+ )
565
+ if fsdp_state._state_dict_config.offload_to_cpu:
566
+ sharded_tensor = sharded_tensor.cpu()
567
+ state_dict[fqn] = sharded_tensor
568
+
569
+ return _common_unshard_post_state_dict_hook(
570
+ module, fsdp_state, state_dict, prefix, param_hook
571
+ )
572
+
573
+
574
+ @no_type_check
575
+ def _sharded_post_load_state_dict_hook(
576
+ module: nn.Module, fsdp_state: _FSDPState, *args, **kwargs
577
+ ) -> None:
578
+ if _has_fsdp_params(fsdp_state, module):
579
+ with SimpleProfiler.profile("_exit_unshard_params_ctx"):
580
+ _exit_unshard_params_ctx(module, fsdp_state)
581
+
582
+
583
+ @no_type_check
584
+ def _sharded_pre_load_state_dict_hook(
585
+ module: nn.Module,
586
+ fsdp_state: _FSDPState,
587
+ state_dict: Dict[str, Any],
588
+ prefix: str,
589
+ ) -> None:
590
+ """
591
+ The hook combines the unflattened, sharded parameters (ShardedTensor) to
592
+ a new FlatParameter and shards the new FlatParameter to the local chunk.
593
+ """
594
+ _lazy_init(fsdp_state, module)
595
+ if not _is_composable(fsdp_state):
596
+ _replace_by_prefix(state_dict, prefix, prefix + f"{FSDP_PREFIX}")
597
+ if not _has_fsdp_params(fsdp_state, module):
598
+ return
599
+
600
+ handle = _module_handle(fsdp_state, module)
601
+ if not handle.uses_sharded_strategy:
602
+ raise RuntimeError(
603
+ "load_sharded_state_dict can only be called when parameters "
604
+ "are flattened and sharded."
605
+ )
606
+ fqn_to_param_ext = dict(
607
+ zip(handle.flat_param._fqns, handle.flat_param._param_extensions)
608
+ )
609
+
610
+ for fqn, _, _ in _param_name_infos(module, fsdp_state):
611
+ if not _is_composable(fsdp_state):
612
+ fqn_from_global_root = f"{prefix}{FSDP_PREFIX}{fqn}"
613
+ else:
614
+ fqn_from_global_root = f"{prefix}{fqn}"
615
+ try:
616
+ param = state_dict.pop(fqn_from_global_root)
617
+ except KeyError:
618
+ logger.warning(
619
+ f"Did not find param with FQN {fqn_from_global_root}, skipping it. " # noqa: G004
620
+ "The weight will not be filled if you expect it to be."
621
+ )
622
+ continue # TODO: Improve unittesting for state_dict finetuning
623
+ # cases: https://github.com/pytorch/pytorch/issues/109134
624
+
625
+ if not fsdp_state._state_dict_config._use_dtensor:
626
+ # All-gather the param (ShardedTensor)
627
+ param, shards = _ext_pre_load_state_dict_transform(
628
+ param, fsdp_state._fsdp_extension
629
+ )
630
+
631
+ assert len(shards) < 2, (
632
+ "Expects 0 or 1 shard per rank "
633
+ f"but got {len(shards)} shards on rank {fsdp_state.rank}."
634
+ )
635
+ param_numel = param.size().numel()
636
+ dim_0_size = param.size()[0]
637
+ chunk_size = (
638
+ math.ceil(dim_0_size / fsdp_state.world_size)
639
+ * param_numel
640
+ // dim_0_size
641
+ )
642
+ if len(shards) == 1:
643
+ local_tensor = shards[0].tensor.flatten()
644
+ with SimpleProfiler.profile(SimpleProfiler.Type.H2D):
645
+ local_tensor = local_tensor.to(fsdp_state.compute_device)
646
+ num_padding = chunk_size - local_tensor.numel()
647
+ if num_padding > 0:
648
+ local_tensor = F.pad(local_tensor, [0, num_padding])
649
+ else:
650
+ local_tensor = torch.zeros(
651
+ chunk_size, dtype=param.dtype, device=fsdp_state.compute_device
652
+ )
653
+ tensor = torch.empty(
654
+ chunk_size * fsdp_state.world_size,
655
+ dtype=local_tensor.dtype,
656
+ device=fsdp_state.compute_device,
657
+ )
658
+ with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER):
659
+ dist.all_gather_into_tensor(
660
+ tensor, local_tensor, group=fsdp_state.process_group
661
+ )
662
+ tensor = tensor.narrow(0, 0, param_numel).reshape(param.size())
663
+ state_dict[fqn_from_global_root] = tensor
664
+ else:
665
+ if param.device != fsdp_state._device_mesh.device_type:
666
+ param = param.to(fsdp_state._device_mesh.device_type)
667
+
668
+ root_mesh = _mesh_resources.get_root_mesh(fsdp_state._device_mesh)
669
+ local_tensor = _ext_all_gather_dtensor(
670
+ param, root_mesh, fsdp_state._fsdp_extension
671
+ )
672
+
673
+ if fqn_to_param_ext.get(fqn) is not None:
674
+ ext = fqn_to_param_ext[fqn]
675
+ local_tensor = _ext_post_unflatten_transform(
676
+ local_tensor, ext, fsdp_state._fsdp_extension
677
+ )
678
+ state_dict[fqn_from_global_root] = local_tensor
679
+
680
+ with SimpleProfiler.profile("_enter_unshard_params_ctx"):
681
+ _enter_unshard_params_ctx(module, fsdp_state, writeback=True)
682
+
683
+
684
+ @contextlib.contextmanager
685
+ def _replace_with_full_state_dict_type(fsdp_state: _FSDPState) -> Generator:
686
+ old_state_dict_config = fsdp_state._state_dict_config
687
+ old_state_dict_type = fsdp_state._state_dict_type
688
+ fsdp_state._state_dict_config = FullStateDictConfig()
689
+ fsdp_state._state_dict_type = StateDictType.FULL_STATE_DICT
690
+ yield
691
+ fsdp_state._state_dict_config = old_state_dict_config
692
+ fsdp_state._state_dict_type = old_state_dict_type
693
+
694
+
695
+ @no_type_check
696
+ @torch.no_grad()
697
+ def _post_state_dict_hook(
698
+ module: nn.Module,
699
+ state_dict: Dict[str, Any],
700
+ prefix: str,
701
+ *args: Any,
702
+ ) -> Dict[str, Any]:
703
+ """
704
+ _post_state_dict_hook() is called after the state_dict() of this
705
+ FSDP module is executed. ``fsdp_state._state_dict_type`` is used to decide
706
+ what postprocessing will be done.
707
+ """
708
+ fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
709
+ if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD:
710
+ context = _replace_with_full_state_dict_type(fsdp_state)
711
+ warnings.warn(
712
+ "When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will"
713
+ "be returned."
714
+ )
715
+ else:
716
+ context = contextlib.nullcontext()
717
+
718
+ with context:
719
+ _post_state_dict_hook_fn = {
720
+ StateDictType.FULL_STATE_DICT: _full_post_state_dict_hook,
721
+ StateDictType.LOCAL_STATE_DICT: _local_post_state_dict_hook,
722
+ StateDictType.SHARDED_STATE_DICT: _sharded_post_state_dict_hook,
723
+ }
724
+ processed_state_dict = _post_state_dict_hook_fn[fsdp_state._state_dict_type](
725
+ module, fsdp_state, state_dict, prefix
726
+ )
727
+
728
+ if fsdp_state._is_root:
729
+ logger.info("FSDP finished processing state_dict(), prefix=%s", prefix)
730
+ for key, tensor in sorted(processed_state_dict.items()):
731
+ if key.startswith(prefix) and isinstance(tensor, torch.Tensor):
732
+ local_shape = tensor.shape
733
+ if isinstance(tensor, ShardedTensor):
734
+ local_shape = None
735
+ shards = tensor.local_shards()
736
+ if shards:
737
+ local_shape = shards[0].tensor.shape
738
+ elif isinstance(tensor, DTensor):
739
+ local_shape = tensor.to_local().shape
740
+ logger.info(
741
+ "FQN=%s: type=%s, shape=%s, local_shape=%s, dtype=%s, device=%s",
742
+ key,
743
+ type(tensor),
744
+ tensor.shape,
745
+ local_shape,
746
+ tensor.dtype,
747
+ tensor.device,
748
+ )
749
+
750
+ return processed_state_dict
751
+
752
+
753
+ @no_type_check
754
+ @torch.no_grad()
755
+ def _pre_state_dict_hook(
756
+ module: nn.Module,
757
+ *args,
758
+ **kwargs,
759
+ ) -> None:
760
+ """
761
+ This is called before the core state dict saving logic of ``module``.
762
+ ``fsdp_state._state_dict_type`` is used to decide what postprocessing will
763
+ be done.
764
+ """
765
+ fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
766
+ if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD:
767
+ context = _replace_with_full_state_dict_type(fsdp_state)
768
+ warnings.warn(
769
+ "When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will"
770
+ "be returned."
771
+ )
772
+ else:
773
+ _set_use_dtensor(fsdp_state)
774
+ context = contextlib.nullcontext()
775
+
776
+ with context:
777
+ _pre_state_dict_hook_fn = {
778
+ StateDictType.FULL_STATE_DICT: _full_pre_state_dict_hook,
779
+ StateDictType.LOCAL_STATE_DICT: _local_pre_state_dict_hook,
780
+ StateDictType.SHARDED_STATE_DICT: _sharded_pre_state_dict_hook,
781
+ }
782
+ _pre_state_dict_hook_fn[fsdp_state._state_dict_type](
783
+ fsdp_state,
784
+ module,
785
+ *args,
786
+ **kwargs,
787
+ )
788
+
789
+
790
+ @no_type_check
791
+ def _set_use_dtensor(fsdp_state: _FSDPState) -> None:
792
+ # If device_mesh is passed in when initalizing FSDP, we automatically turn the
793
+ # _use_dtensor flag to be true for ShardedStateDictConfig().
794
+ if getattr(fsdp_state, "_device_mesh", None):
795
+ state_dict_type = fsdp_state._state_dict_type
796
+ if state_dict_type == StateDictType.LOCAL_STATE_DICT:
797
+ raise RuntimeError(
798
+ "Found state_dict_type LOCAL_STATE_DICT",
799
+ "DeviceMesh is not compatible with LOCAL_STATE_DICT.",
800
+ "Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.",
801
+ )
802
+ else:
803
+ fsdp_state._state_dict_config._use_dtensor = True
804
+
805
+
806
+ @no_type_check
807
+ @torch.no_grad()
808
+ def _pre_load_state_dict_hook(
809
+ module: nn.Module,
810
+ state_dict: Dict[str, Any],
811
+ prefix: str,
812
+ *args: Any,
813
+ ) -> None:
814
+ """
815
+ This is called before ``module._load_from_state_dict()``.
816
+ ``fsdp_state._state_dict_type`` is used to decide what preprocessing will
817
+ be done.
818
+ """
819
+ fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
820
+ if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD:
821
+ context = _replace_with_full_state_dict_type(fsdp_state)
822
+ warnings.warn(
823
+ "When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will"
824
+ "be returned."
825
+ )
826
+ else:
827
+ _set_use_dtensor(fsdp_state)
828
+ context = contextlib.nullcontext()
829
+
830
+ _lazy_init(fsdp_state, module)
831
+ if fsdp_state._is_root:
832
+ SimpleProfiler.reset()
833
+
834
+ with context:
835
+ _pre_load_state_dict_hook_fn = {
836
+ StateDictType.FULL_STATE_DICT: _full_pre_load_state_dict_hook,
837
+ StateDictType.LOCAL_STATE_DICT: _local_pre_load_state_dict_hook,
838
+ StateDictType.SHARDED_STATE_DICT: _sharded_pre_load_state_dict_hook,
839
+ }
840
+ # Code that is common for all state_dict impls
841
+ if fsdp_state._device_handle.is_available():
842
+ fsdp_state._device_handle.synchronize()
843
+ # Dispatch into state_dict specific implementation of pre-hook.
844
+ _pre_load_state_dict_hook_fn[fsdp_state._state_dict_type](
845
+ module, fsdp_state, state_dict, prefix
846
+ )
847
+
848
+
849
+ @no_type_check
850
+ @torch.no_grad()
851
+ def _post_load_state_dict_hook(
852
+ module: nn.Module,
853
+ incompatible_keys: Tuple[List[str], List[str]],
854
+ *args: Any,
855
+ ) -> None:
856
+ fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
857
+ if fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD:
858
+ context = _replace_with_full_state_dict_type(fsdp_state)
859
+ warnings.warn(
860
+ "When using ``NO_SHARD`` for ``ShardingStrategy``, full_state_dict will"
861
+ "be returned."
862
+ )
863
+ else:
864
+ context = contextlib.nullcontext()
865
+
866
+ with context:
867
+ _post_load_state_dict_hook_fn = {
868
+ StateDictType.FULL_STATE_DICT: _full_post_load_state_dict_hook,
869
+ StateDictType.LOCAL_STATE_DICT: _local_post_load_state_dict_hook,
870
+ StateDictType.SHARDED_STATE_DICT: _sharded_post_load_state_dict_hook,
871
+ }
872
+ # Code that is common for all state_dict impls
873
+ # Dispatch into state_dict type specific implementation of post-hook for
874
+ # loading state_dict.
875
+ _post_load_state_dict_hook_fn[fsdp_state._state_dict_type](module, fsdp_state)
876
+
877
+ # When reporting incompatible keys, trim FSDP prefixes.
878
+ missing_keys = incompatible_keys[0]
879
+ unexpected_keys = incompatible_keys[1]
880
+ for i in range(len(missing_keys)):
881
+ missing_keys[i] = clean_tensor_name(missing_keys[i])
882
+
883
+ for i in range(len(unexpected_keys)):
884
+ unexpected_keys[i] = clean_tensor_name(unexpected_keys[i])
885
+
886
+ if fsdp_state._is_root:
887
+ SimpleProfiler.dump_and_reset("FSDP model load_state_dict profiling: ")
888
+
889
+
890
+ def _register_all_state_dict_hooks(state: _FSDPState):
891
+ """
892
+ Registers pre-save, post-save, pre-load, and post-load state dict hooks.
893
+ """
894
+ for hook_registration_fn_str, hook, hook_registration_fn_kwargs in (
895
+ ("register_state_dict_pre_hook", _pre_state_dict_hook, {}),
896
+ ("_register_state_dict_hook", _post_state_dict_hook, {}),
897
+ (
898
+ "_register_load_state_dict_pre_hook",
899
+ _pre_load_state_dict_hook,
900
+ {"with_module": True},
901
+ ),
902
+ ("register_load_state_dict_post_hook", _post_load_state_dict_hook, {}),
903
+ ):
904
+ _register_state_dict_hooks_base(
905
+ state, hook_registration_fn_str, hook, hook_registration_fn_kwargs
906
+ )
907
+
908
+
909
+ @no_type_check
910
+ def _register_state_dict_hooks_base(
911
+ state: _FSDPState,
912
+ hook_registration_fn_name: str,
913
+ hook: Callable,
914
+ hook_registration_fn_kwargs: Dict[str, Any],
915
+ ) -> None:
916
+ """Registers ``hook`` using ``hook_registration_fn``."""
917
+ if not _is_composable(state):
918
+ getattr(state, hook_registration_fn_name)(hook, **hook_registration_fn_kwargs)
919
+ else:
920
+ handle = state._handle
921
+ if handle:
922
+ getattr(handle._fully_sharded_module, hook_registration_fn_name)(
923
+ hook, **hook_registration_fn_kwargs
924
+ )
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_trace_utils.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import functools
3
+ from contextlib import contextmanager
4
+ from dataclasses import dataclass, field
5
+ from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Tuple
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+
11
+ @dataclass
12
+ class TracingConfig:
13
+ """
14
+ This represents a symbolic tracing configuration.
15
+
16
+ Args:
17
+ tracer (torch.fx.Tracer): An instance of :class:`torch.fx.Tracer` to
18
+ use for symbolic tracing. The default value is the native
19
+ :class:`torch.fx.Tracer` constructed with default arguments.
20
+ However, the user may want to pass a different value such as the
21
+ ``HFTracer`` for models in the HuggingFace Transformers_ library.
22
+ .. _Transformers: https://huggingface.co/docs/transformers/index
23
+ concrete_args (Optional[Dict[str, Any]]): Concrete arguments that
24
+ should not be treated as ``torch.fx.Proxy`` when tracing the
25
+ module ``forward()``. Passing ``concrete_args`` allows partially
26
+ specializing the forward, e.g. to remove control flow or data
27
+ structures. This ``concrete_args`` here is the same argument used
28
+ in :meth:`~torch.fx.Tracer.trace`.
29
+ """
30
+
31
+ tracer: torch.fx.Tracer = field(default_factory=torch.fx.Tracer)
32
+ concrete_args: Optional[Dict[str, Any]] = None
33
+
34
+
35
+ class _ParamUsageInfo(NamedTuple):
36
+ """
37
+ This is used for ``_ExecutionInfo.module_to_param_usage_infos`` to record
38
+ execution information. The ``dict`` maps modules to a list of these
39
+ ``_ParamUsageInfo`` instances, where each instance represents a group of
40
+ parameters used together.
41
+
42
+ Specifically, for each module key in the ``dict``, each instance of this
43
+ class represents either:
44
+ (1) the module and some sublist of its ``named_parameters()`` used
45
+ together in execution (see ``_patched_create_proxy()``), or
46
+ (2) a submodule and all of ``submodule.named_parameters()`` (see
47
+ ``_patched_call_module()``).
48
+
49
+ Type (1) corresponds to directly using parameters in ops without calling
50
+ ``forward()``, and type (2) corresponds to calling ``forward()``. The
51
+ mapped-to lists in the ``dict`` follow the execution order.
52
+ """
53
+
54
+ module: nn.Module
55
+ named_params: List[Tuple[str, nn.Parameter]]
56
+
57
+
58
+ class _ExecutionInfo:
59
+ """
60
+ This represents the execution order information from the forward pass.
61
+
62
+ Attributes:
63
+ curr_module (nn.Module): Current module being traced.
64
+ module_forward_order (List[nn.Module]): The modules in (pre-)forward
65
+ order, i.e. the order in which their ``forward()`` methods are
66
+ called. Each call to a module's ``forward()`` corresponds to one
67
+ element in the list.
68
+ module_to_param_usage_infos (Dict[nn.Module, List[_ParamUsageInfo]]):
69
+ Maps a module to a list of module execution infos. See
70
+ :class:`_ParamUsageInfo` for details.
71
+ param_forward_order (List[nn.Parameter]): The parameters in forward
72
+ execution order, where only a parameter's first participation is
73
+ included.
74
+ visited_params (Set[nn.Parameter]): The parameters visited so far
75
+ during the trace. This is only used during tracing for fast
76
+ membership check. Invariant: The parameters in
77
+ ``param_forward_order`` are exactly those in ``visited_params``.
78
+ """
79
+
80
+ def __init__(self, root_module: nn.Module) -> None:
81
+ self.curr_module: nn.Module = root_module
82
+ self.module_forward_order: List[nn.Module] = [root_module]
83
+ self.module_to_param_usage_infos: Dict[nn.Module, List[_ParamUsageInfo]] = {
84
+ root_module: []
85
+ }
86
+ self.param_forward_order: List[nn.Parameter] = []
87
+ self.visited_params: Set[nn.Parameter] = set()
88
+
89
+
90
+ class _ExecOrderTracer:
91
+ def __init__(self) -> None:
92
+ self.exec_info: Optional[_ExecutionInfo] = None
93
+
94
+ @contextmanager
95
+ def patch_tracer(self, tracer: torch.fx.Tracer, root_module: nn.Module):
96
+ self.exec_info = _ExecutionInfo(root_module)
97
+ orig_call_module = tracer.call_module
98
+ orig_create_proxy = tracer.create_proxy
99
+ tracer.call_module = functools.partial( # type: ignore[method-assign]
100
+ self._patched_call_module, orig_call_module, self.exec_info
101
+ )
102
+ fqn_to_param = dict(root_module.named_parameters())
103
+ tracer.create_proxy = functools.partial( # type: ignore[method-assign]
104
+ self._patched_create_proxy,
105
+ orig_create_proxy,
106
+ self.exec_info,
107
+ fqn_to_param,
108
+ )
109
+ try:
110
+ yield
111
+ finally:
112
+ tracer.call_module = orig_call_module # type: ignore[method-assign]
113
+ tracer.create_proxy = orig_create_proxy # type: ignore[method-assign]
114
+
115
+ def _patched_call_module(
116
+ self,
117
+ call_module: Callable,
118
+ exec_info: _ExecutionInfo,
119
+ # Below are the expected arguments to `call_module()`
120
+ module: nn.Module,
121
+ forward: Callable,
122
+ args: Tuple[Any, ...],
123
+ kwargs: Dict[str, Any],
124
+ ) -> Any:
125
+ """
126
+ Overrides ``call_module`` to save execution information to
127
+ ``exec_info``. Note that ``call_module`` is called during symbolic
128
+ tracing for each non-root module.
129
+
130
+ Args:
131
+ call_module (Callable): Original ``call_module`` to override.
132
+ exec_info (_ExecutionInfo): Used to record execution information.
133
+ module (nn.Module): Module corresponding to this ``call_module``.
134
+ forward (Callable): ``forward()`` method of ``module`` to be called
135
+ for this ``call_module``.
136
+ args (Tuple[Any, ...]): Positional arguments for ``forward``.
137
+ kwargs (Dict[str, Any]): Keyword arguments for ``forward``.
138
+
139
+ Returns:
140
+ Same return value as ``call_module``.
141
+ """
142
+ exec_info.module_forward_order.append(module)
143
+ named_params = list(module.named_parameters())
144
+ curr_module = exec_info.curr_module
145
+ if named_params:
146
+ assert (
147
+ curr_module in exec_info.module_to_param_usage_infos
148
+ ), "The current module should have already been processed by a patched `call_module`"
149
+ exec_info.module_to_param_usage_infos[exec_info.curr_module].append(
150
+ _ParamUsageInfo(module, named_params)
151
+ )
152
+ prev_curr_module = curr_module
153
+ exec_info.curr_module = module
154
+ exec_info.module_to_param_usage_infos[module] = []
155
+ output = call_module(module, forward, args, kwargs)
156
+ exec_info.curr_module = prev_curr_module
157
+ return output
158
+
159
+ def _patched_create_proxy(
160
+ self,
161
+ create_proxy: Callable,
162
+ exec_info: _ExecutionInfo,
163
+ fqn_to_param: Dict[str, nn.Parameter],
164
+ # Below are the expected arguments to `create_proxy()`
165
+ kind: str,
166
+ target: torch.fx.node.Target,
167
+ args: Tuple[Any, ...],
168
+ kwargs: Dict[str, Any],
169
+ name: Optional[str] = None,
170
+ type_expr: Optional[Any] = None,
171
+ proxy_factory_fn: Optional[Callable[[torch.fx.Node], torch.fx.Proxy]] = None,
172
+ ) -> torch.fx.Proxy:
173
+ """
174
+ Overrides ``create_proxy`` to save execution information to
175
+ ``exec_info``. Note that ``create_proxy`` is called during symbolic
176
+ tracing for each leaf function/method/module.
177
+
178
+ Args:
179
+ create_proxy (Callable): Original ``create_proxy`` to override.
180
+ exec_info (_ExecutionInfo): Used to record execution information.
181
+ fqn_to_param (Dict[str, nn.Parameter]): ``dict`` version of the
182
+ root module's ``named_parameters()`` with FQN as key and
183
+ parameter as value.
184
+ kind (str): Kind of the target method ('call_function',
185
+ 'call_method', 'get_attr', 'call_module', 'placeholder', or
186
+ 'output'). See :class:`torch.fx.Graph` for details. This is
187
+ passed to ``create_proxy``.
188
+ target (torch.fx.node.Target): Contains the string name of the
189
+ function/method/module. This is passed to ``create_proxy``.
190
+ args (Tuple[Any, ...]): Positional arguments for the function/
191
+ method/module. This is passed to ``create_proxy``.
192
+ kwargs (Dict[str, Any]): Keyword arguments for the function/method/
193
+ module. This is passed to ``create_proxy``
194
+ name (Optional[str]): An optional string name for the ``Node``
195
+ created in ``create_proxy``. This is passed to
196
+ ``create_proxy``.
197
+ type_expr (Optional[Any]): An optional type annotation representing
198
+ the Python type that the output of the node has. This is passed
199
+ to ``create_proxy``.
200
+ proxy_factory_fn (Callable[[torch.fx.Node], torch.fx.Proxy]):
201
+ An alternative proxy constructor used in ``create_proxy``. This
202
+ is passed to ``create_proxy``.
203
+
204
+ Returns:
205
+ torch.fx.Proxy: Created ``Node`` wrapped in a ``Proxy`` object.
206
+ """
207
+ proxy = create_proxy(
208
+ kind, target, args, kwargs, name, type_expr, proxy_factory_fn
209
+ )
210
+ curr_module = exec_info.curr_module
211
+ if kind in ("call_function", "call_method"):
212
+ if args is not None:
213
+ named_params: List[Tuple[str, nn.Parameter]] = []
214
+ for arg in args:
215
+ if (
216
+ isinstance(arg, torch.fx.Proxy)
217
+ and arg.node.target in fqn_to_param
218
+ ):
219
+ param = fqn_to_param[arg.node.target] # type: ignore[index]
220
+ named_params.append((arg.node.target, param)) # type: ignore[arg-type]
221
+ if param not in exec_info.visited_params:
222
+ exec_info.visited_params.add(param)
223
+ exec_info.param_forward_order.append(param)
224
+ if named_params:
225
+ exec_info.module_to_param_usage_infos[curr_module].append(
226
+ _ParamUsageInfo(curr_module, named_params)
227
+ )
228
+ elif kind == "call_module":
229
+ named_params = list(curr_module.named_parameters())
230
+ if named_params:
231
+ exec_info.module_to_param_usage_infos[curr_module].append(
232
+ _ParamUsageInfo(curr_module, named_params)
233
+ )
234
+ for _, param in named_params:
235
+ if param not in exec_info.visited_params:
236
+ exec_info.visited_params.add(param)
237
+ exec_info.param_forward_order.append(param)
238
+ return proxy
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_traversal_utils.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ NOTE: This file must be imported like
3
+ ``import torch.distributed.fsdp._traversal_utils`` and not like
4
+ ``from torch.distirbuted.fsdp._traversal_utils import ...`` to avoid circular
5
+ imports. For brevity, we may import the file as ``traversal_utils``.
6
+ """
7
+
8
+ import collections
9
+ from typing import Deque, List, Set, Tuple
10
+
11
+ import torch.nn as nn
12
+ from torch.distributed._composable.contract import _get_registry
13
+ from torch.distributed.fsdp._common_utils import _FSDPState, _get_module_fsdp_state
14
+
15
+
16
+ """
17
+ [Note: FSDP State Traversal]
18
+ For the wrapper code path, ``_FSDPState`` is the ``FullyShardedDataParallel``
19
+ module wrapping a fully sharded module, and for the non-wrapper code path,
20
+ ``_FSDPState`` is an object that gets embedded on a fully sharded module.
21
+ See [Note: Fully Sharded Module] for the definition.
22
+
23
+ There are three common traversal idioms: Given a root module,
24
+ - ``_get_fsdp_states()`` returns all ``_FSDPState`` s in the tree.
25
+ - ``get_fsdp_root_states()`` returns all local root ``_FSDPState`` s in the
26
+ tree (i.e. those with ``_is_root == True``).
27
+ - ``_get_fsdp_handles()``returns all ``FlatParamHandle`` s in the tree.
28
+
29
+ All of these methods must take in the root module (i.e. an ``nn.Module``) and
30
+ not a general ``_FSDPState`` because ``_FSDPState`` does not support a graph
31
+ traversal, whereas ``nn.Module`` has ``nn.Module.modules()`` for traversal.
32
+ """
33
+
34
+
35
+ def _composable(module: nn.Module) -> bool:
36
+ """
37
+ Returns if ``module`` can compose with ``fully_shard``.
38
+ """
39
+ # TODO: Add any other composable APIs that are mutually exclusive.
40
+ registry = _get_registry(module)
41
+ if registry is None:
42
+ return True
43
+ return "replicate" not in registry
44
+
45
+
46
+ # TODO (awgu): We may be able to remove this function if we retired the
47
+ # `use_orig_params=False` code path since so far we only need the module for
48
+ # `FlatParameter` registration, which is not needed for `use_orig_params=True`.
49
+ def _get_fsdp_states_with_modules(
50
+ module: nn.Module,
51
+ ) -> Tuple[List[_FSDPState], List[nn.Module]]:
52
+ """
53
+ Returns a tuple containing:
54
+ 1. A list of the ``_FSDPState`` instances in the module tree rooted at
55
+ ``module`` without any duplicates and following the ``module.modules()``
56
+ traversal order (which is assumed to be depth-first).
57
+ 2. A corresponding list of the modules owning the states in the first list.
58
+
59
+ For the wrapper code path, both returned lists are the same, each
60
+ containing all ``FullyShardedDataParallel`` instances. For the composable
61
+ code path, this returns a list of all composable state instances and a list
62
+ of the corresponding fully sharded modules. See [Note: Fully Sharded
63
+ Module].
64
+
65
+ NOTE: The traversal does not proceed into any module annotated by an
66
+ incompatible API (e.g. ``replicate``).
67
+ """
68
+ fsdp_states: List[_FSDPState] = []
69
+ fsdp_modules: List[nn.Module] = []
70
+ # Track the visited FSDP states since multiple modules may share the same
71
+ # one and we want to return a de-duplicated list
72
+ visited_fsdp_states: Set[_FSDPState] = set()
73
+ # Track the visited modules in case of shared modules, which implies the
74
+ # module graph is no longer a tree
75
+ visited_modules: Set[nn.Module] = set()
76
+
77
+ # Perform depth-first search from `module` to ensure that we do not
78
+ # traverse into an incompatible API's subtree (use DFS instead of BFS to
79
+ # match `.modules()` order)
80
+ deque: Deque[nn.Module] = collections.deque([module])
81
+ while deque:
82
+ submodule = deque.popleft()
83
+ visited_modules.add(submodule)
84
+ if not _composable(submodule):
85
+ continue
86
+ for child_module in reversed(list(submodule.children())):
87
+ if child_module not in visited_modules:
88
+ deque.appendleft(child_module)
89
+ optional_state = _get_module_fsdp_state(submodule)
90
+ if optional_state is not None and optional_state not in visited_fsdp_states:
91
+ visited_fsdp_states.add(optional_state)
92
+ fsdp_states.append(optional_state)
93
+ fsdp_modules.append(submodule)
94
+ return fsdp_states, fsdp_modules
95
+
96
+
97
+ def _get_fsdp_states(module: nn.Module) -> List[_FSDPState]:
98
+ """See :func:`_get_fsdp_states_with_modules`."""
99
+ fsdp_states, _ = _get_fsdp_states_with_modules(module)
100
+ return fsdp_states
101
+
102
+
103
+ def _get_fsdp_handles(module: nn.Module) -> List:
104
+ """
105
+ Returns all ``FlatParamHandle`` s in the module tree rooted at ``module``
106
+ following the rules in :func:`_get_fsdp_state`.
107
+ """
108
+ handles = [
109
+ fsdp_state._handle
110
+ for fsdp_state in _get_fsdp_states(module)
111
+ if fsdp_state._handle is not None
112
+ ]
113
+ return handles
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/_unshard_param_utils.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import contextlib
3
+ import warnings
4
+ from typing import cast, Generator
5
+
6
+ import torch
7
+ import torch.distributed.fsdp._traversal_utils as traversal_utils
8
+ import torch.nn as nn
9
+ from torch.distributed.fsdp._common_utils import (
10
+ _FSDPState,
11
+ _get_module_fsdp_state,
12
+ _has_fsdp_params,
13
+ _module_handle,
14
+ HandleTrainingState,
15
+ TrainingState,
16
+ )
17
+ from torch.distributed.fsdp._runtime_utils import (
18
+ _lazy_init,
19
+ _reset_flat_param_grad_info_if_needed,
20
+ _reshard,
21
+ _reshard_grads,
22
+ _unshard,
23
+ _unshard_grads,
24
+ )
25
+ from torch.distributed.utils import _p_assert
26
+
27
+ from ._flat_param import FlatParamHandle
28
+
29
+
30
+ FLAT_PARAM = "_flat_param"
31
+
32
+
33
+ @torch.no_grad()
34
+ def _writeback_to_local_shard(
35
+ handle: FlatParamHandle,
36
+ writeback_grad: bool,
37
+ ):
38
+ """
39
+ For the handle, writes back the this rank's shard of the unsharded
40
+ flattened parameter to the sharded flattened parameter. If
41
+ ``writeback_grad=True``, then writes back to the sharded gradient as
42
+ well.
43
+
44
+ Precondition: The handle's ``FlatParameter`` 's data points to the
45
+ padded unsharded flattened parameter.
46
+ """
47
+
48
+ def _get_shard(flat_param_or_grad: torch.Tensor) -> torch.Tensor:
49
+ if handle.uses_sharded_strategy:
50
+ # For sharded strategies, get the *unpadded* shard instead of
51
+ # the *padded* shard to persist user changes to the padding
52
+ # (though FSDP does not explicitly support this)
53
+ shard, _ = FlatParamHandle._get_unpadded_shard(
54
+ flat_param_or_grad,
55
+ handle.rank,
56
+ handle.world_size,
57
+ )
58
+ return shard
59
+ # For `NO_SHARD`, the `flat_param` or its gradient may be modified,
60
+ # so we write it back directly
61
+ return flat_param_or_grad
62
+
63
+ param_shard = _get_shard(handle.flat_param)
64
+ handle.flat_param._local_shard[: param_shard.numel()].copy_(param_shard) # type: ignore[attr-defined]
65
+ if writeback_grad:
66
+ existing_grad = handle.sharded_grad
67
+ if existing_grad is not None:
68
+ assert handle.flat_param.grad is not None
69
+ grad_shard = _get_shard(handle.flat_param.grad)
70
+ existing_grad[: grad_shard.numel()].copy_(grad_shard)
71
+
72
+
73
+ def _deregister_flat_param(state: _FSDPState, module: nn.Module) -> None:
74
+ """
75
+ De-registers the flattened parameter from the wrapped module, hiding it
76
+ from ``nn.Module`` methods.
77
+
78
+ We do not use ``del`` because we want ``FLAT_PARAM`` to always be an
79
+ attribute but dynamically change whether it is visible to ``nn.Module``
80
+ methods.
81
+ """
82
+ if _has_fsdp_params(state, module):
83
+ # TODO: figure out the case for the composable APIs.
84
+ cast(nn.Module, module.module)._parameters.pop(FLAT_PARAM, None)
85
+
86
+
87
+ def _register_flat_param(state: _FSDPState, module: nn.Module) -> None:
88
+ """
89
+ Registers the flattened parameter to the wrapped module, making it
90
+ visible to ``nn.Module`` methods.
91
+
92
+ We do not use :meth:`nn.Module.register_parameter` because we want
93
+ ``FLAT_PARAM`` to always be an attribute but dynamically change whether
94
+ it is visible to ``nn.Module`` methods.
95
+ """
96
+ handle = _module_handle(state, module)
97
+ if _has_fsdp_params(state, module):
98
+ # TODO: figure out the case for the composable APIs.
99
+ cast(nn.Module, module.module)._parameters[FLAT_PARAM] = handle.flat_param
100
+
101
+
102
+ @contextlib.contextmanager
103
+ def _unflatten_as_params(state: _FSDPState, module: nn.Module) -> Generator:
104
+ """
105
+ Assumes that the flattened parameter is unsharded. When in the context,
106
+ de-registers the flattened parameter and unflattens the original
107
+ parameters as ``nn.Parameter`` views into the flattened parameter.
108
+ After the context, re-registers the flattened parameter and restores
109
+ the original parameters as ``Tensor`` views into the flattened
110
+ parameter.
111
+ """
112
+ handle = _module_handle(state, module)
113
+ if not handle:
114
+ yield
115
+ else:
116
+ _deregister_flat_param(state, module)
117
+ try:
118
+ with handle.unflatten_as_params():
119
+ yield
120
+ finally:
121
+ if not handle._use_orig_params:
122
+ _register_flat_param(state, module)
123
+
124
+
125
+ def _validate_unshard_params_args(
126
+ state: _FSDPState,
127
+ writeback: bool,
128
+ rank0_only: bool,
129
+ offload_to_cpu: bool,
130
+ with_grads: bool,
131
+ ) -> None:
132
+ if with_grads and (offload_to_cpu or not state._use_orig_params):
133
+ raise NotImplementedError(
134
+ f"with_grads={with_grads}, "
135
+ f"use_orig_params={state._use_orig_params}, "
136
+ f"offload_to_cpu={offload_to_cpu} "
137
+ f"is not supported yet"
138
+ )
139
+ if offload_to_cpu and state._handle and (not state._handle.uses_sharded_strategy):
140
+ raise NotImplementedError(
141
+ "offload_to_cpu=True and NO_SHARD is not supported yet"
142
+ )
143
+ if writeback and rank0_only:
144
+ # TODO: Rank 0 can broadcast the `FlatParameter` to allow all ranks to
145
+ # persist the changes.
146
+ raise NotImplementedError(
147
+ "writeback=True and rank0_only=True is not supported yet"
148
+ )
149
+ if offload_to_cpu and not rank0_only:
150
+ warnings.warn(
151
+ "offload_to_cpu=True and rank0_only=False may result in the"
152
+ "unsharded parameters being redundantly copied to CPU memory for "
153
+ "GPUs sharing the same CPU memory, which risks CPU OOM. We "
154
+ "recommend using offload_to_cpu=True with rank0_only=True."
155
+ )
156
+
157
+
158
+ @contextlib.contextmanager
159
+ def _unshard_fsdp_state_params(
160
+ module: nn.Module,
161
+ state: _FSDPState,
162
+ writeback: bool,
163
+ rank0_only: bool,
164
+ offload_to_cpu: bool,
165
+ with_grads: bool,
166
+ ):
167
+ """
168
+ This unshards the parameters for a single FSDP state ``state`` that
169
+ corresponds to ``module``.
170
+ """
171
+ _validate_unshard_params_args(
172
+ state, writeback, rank0_only, offload_to_cpu, with_grads
173
+ )
174
+ state._device_handle.synchronize()
175
+ # If handles are shared by other module(s), the handle may be already unsharded.
176
+ maybe_handle = _module_handle(state, module)
177
+ handle = None
178
+ if (
179
+ maybe_handle
180
+ and maybe_handle._training_state != HandleTrainingState.SUMMON_FULL_PARAMS
181
+ ):
182
+ handle = maybe_handle
183
+ if not handle:
184
+ yield
185
+ return
186
+
187
+ assert (
188
+ handle._training_state == HandleTrainingState.IDLE
189
+ ), f"Expects the handle training to be IDLE but got {handle._training_state}"
190
+
191
+ handle._training_state = HandleTrainingState.SUMMON_FULL_PARAMS
192
+
193
+ _reset_flat_param_grad_info_if_needed(handle)
194
+ free_unsharded_flat_param = handle.needs_unshard()
195
+ # No need to call `wait_stream()` since we unshard in the computation
196
+ # stream directly
197
+ computation_stream = state._device_handle.current_stream()
198
+ _unshard(state, handle, computation_stream, computation_stream)
199
+ if with_grads:
200
+ _unshard_grads(handle)
201
+
202
+ if rank0_only and state.rank != 0:
203
+ # Free the unsharded flattened parameter early
204
+ _reshard(state, handle, free_unsharded_flat_param)
205
+ if with_grads:
206
+ _reshard_grads(handle)
207
+ try:
208
+ yield
209
+ finally:
210
+ handle._training_state = HandleTrainingState.IDLE
211
+ else:
212
+ # Unflatten the unsharded flattened parameters
213
+ with contextlib.ExitStack() as stack:
214
+ # Invariant: rank == 0 or !rank0_only
215
+ if offload_to_cpu and handle.uses_sharded_strategy:
216
+ stack.enter_context(handle.to_cpu())
217
+ # NOTE: Since PyTorch enforces that a parameter and its
218
+ # gradients need to match metadata (e.g. device), we must
219
+ # move gradients to CPU *after* we move parameters.
220
+ # NOTE: This assumes 1 `FlatParameter`
221
+ if not state._use_orig_params:
222
+ stack.enter_context(_unflatten_as_params(state, module))
223
+ try:
224
+ yield
225
+ finally:
226
+ stack.close()
227
+ if writeback:
228
+ _writeback_to_local_shard(handle, with_grads)
229
+ _reshard(state, handle, free_unsharded_flat_param)
230
+ if with_grads:
231
+ _reshard_grads(handle)
232
+ handle._training_state = HandleTrainingState.IDLE
233
+
234
+
235
+ @contextlib.contextmanager
236
+ def _unshard_params_for_summon(
237
+ module: nn.Module,
238
+ state: _FSDPState,
239
+ writeback: bool,
240
+ rank0_only: bool,
241
+ offload_to_cpu: bool,
242
+ with_grads: bool,
243
+ ):
244
+ _validate_unshard_params_args(
245
+ state, writeback, rank0_only, offload_to_cpu, with_grads
246
+ )
247
+ _lazy_init(state, module)
248
+ if state.training_state == TrainingState.FORWARD_BACKWARD:
249
+ raise AssertionError(
250
+ "Cannot manually unshard parameters during forward/backward"
251
+ )
252
+ elif state.training_state == TrainingState.SUMMON_FULL_PARAMS:
253
+ raise AssertionError(
254
+ "Cannot manually unshard parameters when already unsharding parameters"
255
+ )
256
+ with _unshard_fsdp_state_params(
257
+ module=module,
258
+ state=state,
259
+ writeback=writeback,
260
+ rank0_only=rank0_only,
261
+ offload_to_cpu=offload_to_cpu,
262
+ with_grads=with_grads,
263
+ ):
264
+ try:
265
+ state.training_state = TrainingState.SUMMON_FULL_PARAMS
266
+ yield
267
+ finally:
268
+ state.training_state = TrainingState.IDLE
269
+
270
+
271
+ @contextlib.contextmanager
272
+ def _unshard_params(
273
+ module: nn.Module,
274
+ recurse: bool,
275
+ writeback: bool,
276
+ rank0_only: bool,
277
+ offload_to_cpu: bool,
278
+ with_grads: bool,
279
+ ):
280
+ """
281
+ This unshards FSDP-managed parameters for all modules with FSDP applied in
282
+ the module tree rooted at ``module``.
283
+ """
284
+ if not recurse:
285
+ optional_state = _get_module_fsdp_state(module)
286
+ if optional_state is None:
287
+ with contextlib.nullcontext():
288
+ yield
289
+ return
290
+ states_and_modules = ([optional_state], [module])
291
+ else:
292
+ states_and_modules = traversal_utils._get_fsdp_states_with_modules(module)
293
+ with contextlib.ExitStack() as stack:
294
+ for state, module in zip(*states_and_modules):
295
+ stack.enter_context(
296
+ _unshard_params_for_summon(
297
+ module=module,
298
+ state=state,
299
+ writeback=writeback,
300
+ rank0_only=rank0_only,
301
+ offload_to_cpu=offload_to_cpu,
302
+ with_grads=with_grads,
303
+ )
304
+ )
305
+ yield
306
+
307
+
308
+ def _deregister_orig_params(state: _FSDPState, module: nn.Module) -> None:
309
+ """
310
+ Deregisters the original parameters; registers the ``FlatParameter``.
311
+ """
312
+ handle = _module_handle(state, module)
313
+ if not handle:
314
+ return
315
+ _p_assert(
316
+ handle._use_orig_params,
317
+ f"Inconsistent `_use_orig_params` -- FSDP: {state._use_orig_params} "
318
+ f"handle: {handle._use_orig_params}",
319
+ )
320
+ handle._deregister_orig_params()
321
+ _register_flat_param(state, module)
322
+
323
+
324
+ def _register_orig_params(state: _FSDPState, module: nn.Module) -> None:
325
+ """
326
+ Deregisters the ``FlatParameter``; registers the original parameters.
327
+ """
328
+ handle = _module_handle(state, module)
329
+ if not handle:
330
+ return
331
+ _deregister_flat_param(state, module)
332
+ if handle.is_sharded(handle.flat_param):
333
+ handle._use_sharded_views()
334
+ handle._use_sharded_grad_views()
335
+ else:
336
+ handle._use_unsharded_views(as_params=True)
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/api.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file includes public APIs for FSDP such as the classes used for the
3
+ constructor arguments.
4
+ """
5
+
6
+ from dataclasses import dataclass
7
+ from enum import auto, Enum
8
+ from typing import Optional, Sequence, Type
9
+
10
+ import torch
11
+ from torch.nn.modules.batchnorm import _BatchNorm
12
+
13
+
14
+ __all__ = [
15
+ "ShardingStrategy",
16
+ "BackwardPrefetch",
17
+ "MixedPrecision",
18
+ "CPUOffload",
19
+ "StateDictType",
20
+ "StateDictConfig",
21
+ "FullStateDictConfig",
22
+ "LocalStateDictConfig",
23
+ "ShardedStateDictConfig",
24
+ "OptimStateDictConfig",
25
+ "FullOptimStateDictConfig",
26
+ "LocalOptimStateDictConfig",
27
+ "ShardedOptimStateDictConfig",
28
+ "StateDictSettings",
29
+ ]
30
+
31
+
32
+ class ShardingStrategy(Enum):
33
+ """
34
+ This specifies the sharding strategy to be used for distributed training by
35
+ :class:`FullyShardedDataParallel`.
36
+
37
+ - ``FULL_SHARD``: Parameters, gradients, and optimizer states are sharded.
38
+ For the parameters, this strategy unshards (via all-gather) before the
39
+ forward, reshards after the forward, unshards before the backward
40
+ computation, and reshards after the backward computation. For gradients,
41
+ it synchronizes and shards them (via reduce-scatter) after the backward
42
+ computation. The sharded optimizer states are updated locally per rank.
43
+ - ``SHARD_GRAD_OP``: Gradients and optimizer states are sharded during
44
+ computation, and additionally, parameters are sharded outside
45
+ computation. For the parameters, this strategy unshards before the
46
+ forward, does not reshard them after the forward, and only reshards them
47
+ after the backward computation. The sharded optimizer states are updated
48
+ locally per rank. Inside ``no_sync()``, the parameters are not resharded
49
+ after the backward computation.
50
+ - ``NO_SHARD``: Parameters, gradients, and optimizer states are not sharded
51
+ but instead replicated across ranks similar to PyTorch's
52
+ :class:`DistributedDataParallel` API. For gradients, this strategy
53
+ synchronizes them (via all-reduce) after the backward computation. The
54
+ unsharded optimizer states are updated locally per rank.
55
+ - ``HYBRID_SHARD``: Apply ``FULL_SHARD`` within a node, and replicate parameters across
56
+ nodes. This results in reduced communication volume as expensive all-gathers and
57
+ reduce-scatters are only done within a node, which can be more performant for medium
58
+ -sized models.
59
+ - ``_HYBRID_SHARD_ZERO2``: Apply ``SHARD_GRAD_OP`` within a node, and replicate parameters across
60
+ nodes. This is like ``HYBRID_SHARD``, except this may provide even higher throughput
61
+ since the unsharded parameters are not freed after the forward pass, saving the
62
+ all-gathers in the pre-backward.
63
+ """
64
+
65
+ FULL_SHARD = auto()
66
+ SHARD_GRAD_OP = auto()
67
+ NO_SHARD = auto()
68
+ HYBRID_SHARD = auto()
69
+ _HYBRID_SHARD_ZERO2 = auto()
70
+
71
+
72
+ class BackwardPrefetch(Enum):
73
+ """
74
+ This configures explicit backward prefetching, which improves throughput by
75
+ enabling communication and computation overlap in the backward pass at the
76
+ cost of slightly increased memory usage.
77
+
78
+ - ``BACKWARD_PRE``: This enables the most overlap but increases memory
79
+ usage the most. This prefetches the next set of parameters *before* the
80
+ current set of parameters' gradient computation. This overlaps the *next
81
+ all-gather* and the *current gradient computation*, and at the peak, it
82
+ holds the current set of parameters, next set of parameters, and current
83
+ set of gradients in memory.
84
+ - ``BACKWARD_POST``: This enables less overlap but requires less memory
85
+ usage. This prefetches the next set of parameters *after* the current
86
+ set of parameters' gradient computation. This overlaps the *current
87
+ reduce-scatter* and the *next gradient computation*, and it frees the
88
+ current set of parameters before allocating memory for the next set of
89
+ parameters, only holding the next set of parameters and current set of
90
+ gradients in memory at the peak.
91
+ - FSDP's ``backward_prefetch`` argument accepts ``None``, which disables
92
+ the backward prefetching altogether. This has no overlap and does not
93
+ increase memory usage. In general, we do not recommend this setting since
94
+ it may degrade throughput significantly.
95
+
96
+ For more technical context: For a single process group using NCCL backend,
97
+ any collectives, even if issued from different streams, contend for the
98
+ same per-device NCCL stream, which implies that the relative order in which
99
+ the collectives are issued matters for overlapping. The two backward
100
+ prefetching values correspond to different issue orders.
101
+ """
102
+
103
+ # NOTE: For both modes, the ordering that defines "current" and "next" is
104
+ # not always exact in the current implementation. A mistargeted prefetch
105
+ # simply means that the parameter memory is allocated earlier than needed,
106
+ # possibly increasing peak memory usage, but does not affect correctness.
107
+ BACKWARD_PRE = auto()
108
+ BACKWARD_POST = auto()
109
+
110
+
111
+ @dataclass
112
+ class MixedPrecision:
113
+ """
114
+ This configures FSDP-native mixed precision training.
115
+
116
+ Attributes:
117
+ param_dtype (Optional[torch.dtype]): This specifies the dtype for model
118
+ parameters during forward and backward and thus the dtype for
119
+ forward and backward computation. Outside forward and backward, the
120
+ *sharded* parameters are kept in full precision (e.g. for the
121
+ optimizer step), and for model checkpointing, the parameters are
122
+ always saved in full precision. (Default: ``None``)
123
+ reduce_dtype (Optional[torch.dtype]): This specifies the dtype for
124
+ gradient reduction (i.e. reduce-scatter or all-reduce). If this is
125
+ ``None`` but ``param_dtype`` is not ``None``, then this takes on
126
+ the ``param_dtype`` value, still running gradient reduction in low
127
+ precision. This is permitted to differ from ``param_dtype``, e.g.
128
+ to force gradient reduction to run in full precision. (Default:
129
+ ``None``)
130
+ buffer_dtype (Optional[torch.dtype]): This specifies the dtype for
131
+ buffers. FSDP does not shard buffers. Rather, FSDP casts them to
132
+ ``buffer_dtype`` in the first forward pass and keeps them in that
133
+ dtype thereafter. For model checkpointing, the buffers are saved
134
+ in full precision except for ``LOCAL_STATE_DICT``. (Default:
135
+ ``None``)
136
+ keep_low_precision_grads (bool): If ``False``, then FSDP upcasts
137
+ gradients to full precision after the backward pass in preparation
138
+ for the optimizer step. If ``True``, then FSDP keeps the gradients
139
+ in the dtype used for gradient reduction, which can save memory if
140
+ using a custom optimizer that supports running in low precision.
141
+ (Default: ``False``)
142
+ cast_forward_inputs (bool): If ``True``, then this FSDP module casts
143
+ its forward args and kwargs to ``param_dtype``. This is to ensure
144
+ that parameter and input dtypes match for forward computation, as
145
+ required by many ops. This may need to be set to ``True`` when only
146
+ applying mixed precision to some but not all FSDP modules, in which
147
+ case a mixed-precision FSDP submodule needs to recast its inputs.
148
+ (Default: ``False``)
149
+ cast_root_forward_inputs (bool): If ``True``, then the root FSDP module
150
+ casts its forward args and kwargs to ``param_dtype``, overriding
151
+ the value of ``cast_forward_inputs``. For non-root FSDP modules,
152
+ this does not do anything. (Default: ``True``)
153
+ _module_classes_to_ignore: (Sequence[Type[nn.Module]]): This specifies
154
+ module classes to ignore for mixed precision when using an
155
+ ``auto_wrap_policy``: Modules of these classes will have FSDP
156
+ applied to them separately with mixed precision disabled (meaning
157
+ that the final FSDP construction would deviate from the specified
158
+ policy). If ``auto_wrap_policy`` is not specified, then this does
159
+ not do anything. This API is experimental and subject to change.
160
+ (Default: ``(_BatchNorm,)``)
161
+
162
+ .. note:: This API is experimental and subject to change.
163
+
164
+ .. note:: Only floating point tensors are cast to their specified dtypes.
165
+
166
+ .. note:: In ``summon_full_params``, parameters are forced to full
167
+ precision, but buffers are not.
168
+
169
+ .. note:: Layer norm and batch norm accumulate in ``float32`` even when
170
+ their inputs are in a low precision like ``float16`` or ``bfloat16``.
171
+ Disabling FSDP's mixed precision for those norm modules only means that
172
+ the affine parameters are kept in ``float32``. However, this incurs
173
+ separate all-gathers and reduce-scatters for those norm modules, which
174
+ may be inefficient, so if the workload permits, the user should prefer
175
+ to still apply mixed precision to those modules.
176
+
177
+ .. note:: By default, if the user passes a model with any ``_BatchNorm``
178
+ modules and specifies an ``auto_wrap_policy``, then the batch norm
179
+ modules will have FSDP applied to them separately with mixed precision
180
+ disabled. See the ``_module_classes_to_ignore`` argument.
181
+
182
+ .. note:: ``MixedPrecision`` has ``cast_root_forward_inputs=True`` and
183
+ ``cast_forward_inputs=False`` by default. For the root FSDP instance,
184
+ its ``cast_root_forward_inputs`` takes precedence over its
185
+ ``cast_forward_inputs``. For non-root FSDP instances, their
186
+ ``cast_root_forward_inputs`` values are ignored. The default setting is
187
+ sufficient for the typical case where each FSDP instance has the same
188
+ ``MixedPrecision`` configuration and only needs to cast inputs to the
189
+ ``param_dtype`` at the beginning of the model's forward pass.
190
+
191
+ .. note:: For nested FSDP instances with different ``MixedPrecision``
192
+ configurations, we recommend setting individual ``cast_forward_inputs``
193
+ values to configure casting inputs or not before each instance's
194
+ forward. In such a case, since the casts happen before each FSDP
195
+ instance's forward, a parent FSDP instance should have its non-FSDP
196
+ submodules run before its FSDP submodules to avoid the activation dtype
197
+ being changed due to a different ``MixedPrecision`` configuration.
198
+
199
+ Example::
200
+
201
+ >>> # xdoctest: +SKIP("undefined variables")
202
+ >>> model = nn.Sequential(nn.Linear(3, 3), nn.Linear(3, 3))
203
+ >>> model[1] = FSDP(
204
+ >>> model[1],
205
+ >>> mixed_precision=MixedPrecision(param_dtype=torch.float16, cast_forward_inputs=True),
206
+ >>> )
207
+ >>> model = FSDP(
208
+ >>> model,
209
+ >>> mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, cast_forward_inputs=True),
210
+ >>> )
211
+
212
+ The above shows a working example. On the other hand, if ``model[1]``
213
+ were replaced with ``model[0]``, meaning that the submodule using
214
+ different ``MixedPrecision`` ran its forward first, then ``model[1]``
215
+ would incorrectly see ``float16`` activations instead of ``bfloat16``
216
+ ones.
217
+
218
+ """
219
+
220
+ param_dtype: Optional[torch.dtype] = None
221
+ reduce_dtype: Optional[torch.dtype] = None
222
+ buffer_dtype: Optional[torch.dtype] = None
223
+ keep_low_precision_grads: bool = False
224
+ cast_forward_inputs: bool = False
225
+ cast_root_forward_inputs: bool = True
226
+ _module_classes_to_ignore: Sequence[Type[torch.nn.Module]] = (_BatchNorm,)
227
+
228
+
229
+ @dataclass
230
+ class CPUOffload:
231
+ """
232
+ This configures CPU offloading.
233
+
234
+ Attributes:
235
+ offload_params (bool): This specifies whether to offload parameters to
236
+ CPU when not involved in computation. If ``True``, then this
237
+ offloads gradients to CPU as well, meaning that the optimizer step
238
+ runs on CPU.
239
+ """
240
+
241
+ offload_params: bool = False
242
+
243
+
244
+ class StateDictType(Enum):
245
+ """
246
+ This enum indicates that which type of ``state_dict`` the FSDP module is
247
+ currently processing (returning or loading).
248
+ The default value is FULL_STATE_DICT to comply the PyTorch convention.
249
+ ..note::
250
+ FSDP currently supports three types of ``state_dict``:
251
+ 1. ``state_dict/load_state_dict`: this pair of APIs return and load
252
+ the non-sharded, unflattened parameters. The semantics is the
253
+ same as using DDP.
254
+ 2. ``_local_state_dict/_load_local_state_dict``: this pair of APIs return
255
+ and load local sharded, flattened parameters. The values returned
256
+ by ``_local_state_dict`` can be directly used by FSDP and is only
257
+ meaningful to FSDP (because parameters are flattened). Note that
258
+ these APIs are meant for use via the :func:`state_dict_type`
259
+ context manager as follows:
260
+ >>> # xdoctest: +SKIP("undefined variables")
261
+ >>> with fsdp.state_dict_type(StateDictType.LOCAL_STATE_DICT):
262
+ ... state = fsdp.state_dict() # loads local state dict
263
+ 3. ``_sharded_state_dict/_load_sharded_state_dict``: this pair of APIs
264
+ return and load sharded, unflattened parameters. The ``state_dict``
265
+ return by ``sharded_state_dict`` can be used by all other parallel
266
+ schemes (resharding may be required).
267
+ """
268
+
269
+ FULL_STATE_DICT = auto()
270
+ LOCAL_STATE_DICT = auto()
271
+ SHARDED_STATE_DICT = auto()
272
+
273
+
274
+ @dataclass
275
+ class StateDictConfig:
276
+ """
277
+ ``StateDictConfig`` is the base class for all ``state_dict`` configuration
278
+ classes. Users should instantiate a child class (e.g.
279
+ ``FullStateDictConfig``) in order to configure settings for the
280
+ corresponding ``state_dict`` type supported by FSDP.
281
+
282
+ Attributes:
283
+ offload_to_cpu (bool): If ``True``, then FSDP offloads the state dict
284
+ values to CPU, and if ``False``, then FSDP keeps them on GPU.
285
+ (Default: ``False``)
286
+ """
287
+
288
+ offload_to_cpu: bool = False
289
+
290
+
291
+ @dataclass
292
+ class FullStateDictConfig(StateDictConfig):
293
+ """
294
+ ``FullStateDictConfig`` is a config class meant to be used with
295
+ ``StateDictType.FULL_STATE_DICT``. We recommend enabling both
296
+ ``offload_to_cpu=True`` and ``rank0_only=True`` when saving full state
297
+ dicts to save GPU memory and CPU memory, respectively. This config class
298
+ is meant to be used via the :func:`state_dict_type` context manager as
299
+ follows:
300
+
301
+ >>> # xdoctest: +SKIP("undefined variables")
302
+ >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
303
+ >>> fsdp = FSDP(model, auto_wrap_policy=...)
304
+ >>> cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
305
+ >>> with FSDP.state_dict_type(fsdp, StateDictType.FULL_STATE_DICT, cfg):
306
+ >>> state = fsdp.state_dict()
307
+ >>> # `state` will be empty on non rank 0 and contain CPU tensors on rank 0.
308
+ >>> # To reload checkpoint for inference, finetuning, transfer learning, etc:
309
+ >>> model = model_fn() # Initialize model in preparation for wrapping with FSDP
310
+ >>> if dist.get_rank() == 0:
311
+ >>> # Load checkpoint only on rank 0 to avoid memory redundancy
312
+ >>> state_dict = torch.load("my_checkpoint.pt")
313
+ >>> model.load_state_dict(state_dict)
314
+ >>> # All ranks initialize FSDP module as usual. `sync_module_states` argument
315
+ >>> # communicates loaded checkpoint states from rank 0 to rest of the world.
316
+ >>> fsdp = FSDP(model, device_id=torch.cuda.current_device(), auto_wrap_policy=..., sync_module_states=True)
317
+ >>> # After this point, all ranks have FSDP model with loaded checkpoint.
318
+
319
+ Attributes:
320
+ rank0_only (bool): If ``True``, then only rank 0 saves the full state
321
+ dict, and nonzero ranks save an empty dict. If ``False``, then all
322
+ ranks save the full state dict. (Default: ``False``)
323
+ """
324
+
325
+ rank0_only: bool = False
326
+
327
+
328
+ @dataclass
329
+ class LocalStateDictConfig(StateDictConfig):
330
+ pass
331
+
332
+
333
+ @dataclass
334
+ class ShardedStateDictConfig(StateDictConfig):
335
+ """
336
+ ``ShardedStateDictConfig`` is a config class meant to be used with
337
+ ``StateDictType.SHARDED_STATE_DICT``.
338
+
339
+ Attributes:
340
+ _use_dtensor (bool): If ``True``, then FSDP saves the state dict values
341
+ as ``DTensor``, and if ``False``, then FSDP saves them as
342
+ ``ShardedTensor``. (Default: ``False``)
343
+
344
+ .. warning:: ``_use_dtensor`` is a private field of :class:`ShardedStateDictConfig`
345
+ and it is used by FSDP to determine the type of state dict values. Users should not
346
+ manually modify ``_use_dtensor``.
347
+ """
348
+
349
+ _use_dtensor: bool = False
350
+
351
+
352
+ @dataclass
353
+ class OptimStateDictConfig:
354
+ """
355
+ ``OptimStateDictConfig`` is the base class for all ``optim_state_dict``
356
+ configuration classes. Users should instantiate a child class (e.g.
357
+ ``FullOptimStateDictConfig``) in order to configure settings for the
358
+ corresponding ``optim_state_dict`` type supported by FSDP.
359
+
360
+ Attributes:
361
+ offload_to_cpu (bool): If ``True``, then FSDP offloads the state dict's
362
+ tensor values to CPU, and if ``False``, then FSDP keeps them on the
363
+ original device (which is GPU unless parameter CPU offloading is
364
+ enabled). (Default: ``True``)
365
+ """
366
+
367
+ offload_to_cpu: bool = True
368
+
369
+
370
+ @dataclass
371
+ class FullOptimStateDictConfig(OptimStateDictConfig):
372
+ """
373
+ Attributes:
374
+ rank0_only (bool): If ``True``, then only rank 0 saves the full state
375
+ dict, and nonzero ranks save an empty dict. If ``False``, then all
376
+ ranks save the full state dict. (Default: ``False``)
377
+ """
378
+
379
+ rank0_only: bool = False
380
+
381
+
382
+ @dataclass
383
+ class LocalOptimStateDictConfig(OptimStateDictConfig):
384
+ offload_to_cpu: bool = False
385
+
386
+
387
+ @dataclass
388
+ class ShardedOptimStateDictConfig(OptimStateDictConfig):
389
+ """
390
+ ``ShardedOptimStateDictConfig`` is a config class meant to be used with
391
+ ``StateDictType.SHARDED_STATE_DICT``.
392
+
393
+ Attributes:
394
+ _use_dtensor (bool): If ``True``, then FSDP saves the state dict values
395
+ as ``DTensor``, and if ``False``, then FSDP saves them as
396
+ ``ShardedTensor``. (Default: ``False``)
397
+
398
+ .. warning:: ``_use_dtensor`` is a private field of :class:`ShardedOptimStateDictConfig`
399
+ and it is used by FSDP to determine the type of state dict values. Users should not
400
+ manually modify ``_use_dtensor``.
401
+ """
402
+
403
+ _use_dtensor: bool = False
404
+
405
+
406
+ @dataclass
407
+ class StateDictSettings:
408
+ state_dict_type: StateDictType
409
+ state_dict_config: StateDictConfig
410
+ optim_state_dict_config: OptimStateDictConfig
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/fully_sharded_data_parallel.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/sharded_grad_scaler.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import logging
3
+ from collections import abc, defaultdict
4
+ from typing import Any, Dict, Iterable, List, Optional, overload, Sequence, Tuple, Union
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+ from torch.amp.grad_scaler import _MultiDeviceReplicator, GradScaler, OptState
9
+ from torch.distributed.distributed_c10d import ProcessGroup
10
+
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ def _refresh_per_optimizer_state() -> Dict[str, Any]:
16
+ return {"stage": OptState.READY, "found_inf_per_device": {}}
17
+
18
+
19
+ def _is_supported_device(tensor: torch.Tensor) -> bool:
20
+ return tensor.is_cuda or tensor.device.type in (
21
+ "xla",
22
+ "cpu",
23
+ "hpu",
24
+ "mtia",
25
+ torch._C._get_privateuse1_backend_name(),
26
+ )
27
+
28
+
29
+ class _GeneralMultiDeviceReplicator(_MultiDeviceReplicator):
30
+ """
31
+ Lazily serves tensor to request device. This class extends
32
+ _MultiDeviceReplicator to allow support for "cpu" as a device.
33
+ """
34
+
35
+ def __init__(self, master_tensor: torch.Tensor) -> None:
36
+ assert _is_supported_device(master_tensor)
37
+ self.master = master_tensor
38
+ self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
39
+
40
+
41
+ class ShardedGradScaler(GradScaler):
42
+ """
43
+ ShardedGradScaler helps perform gradient scaling in a shard aware manner. It extends
44
+ functionality from GradScaler:
45
+ * Supports Pytorch DDP and FSDP implementations
46
+ * Support CPU offloaded tensors (as used in fully sharded data parallel[FSDP])
47
+ * Supports the custom Mixed Precision loss dtype (fp16, bf16) that FSDP returns
48
+ * Sync inf/nan for scaled gradient tensors on any torch.device (where tensors are placed) across
49
+ nodes
50
+
51
+ Example::
52
+
53
+ # Creates a ShardedGradScaler once at the beginning of training.
54
+ scaler = ShardedGradScaler()
55
+
56
+ for epoch in epochs:
57
+ for input, target in data:
58
+ optimizer.zero_grad()
59
+ output = model(input)
60
+ loss = loss_fn(output, target)
61
+
62
+ # Scales loss. Calls backward() on scaled loss to create scaled gradients.
63
+ scaler.scale(loss).backward()
64
+
65
+ # scaler.step() first unscales gradients of the optimizer's params.
66
+ # If gradients don't contain infs/NaNs, optimizer.step() is then called,
67
+ # otherwise, optimizer.step() is skipped.
68
+ scaler.step(optimizer)
69
+
70
+ # Updates the scale for next iteration.
71
+ scaler.update()
72
+
73
+ See :class:`GradScaler` for explanation of scaling/unscaling and more use cases.
74
+
75
+ Args:
76
+ init_scale (float, optional, default=2.**16): Initial scale factor.
77
+ growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
78
+ :meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations.
79
+ backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
80
+ :meth:`update` if inf/NaN gradients occur in an iteration.
81
+ growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
82
+ that must occur for the scale to be multiplied by ``growth_factor``.
83
+ enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply
84
+ invokes the underlying ``optimizer.step()``, and other methods become no-ops.
85
+ Default: ``True``
86
+ process_group (ProcessGroup, optional, default=torch.distributed.group.WORLD):
87
+ process group for sharding
88
+ """
89
+
90
+ def __init__(
91
+ self,
92
+ device: str = "cuda",
93
+ init_scale: float = 2.0**16,
94
+ backoff_factor: float = 0.5,
95
+ growth_factor: float = 2.0,
96
+ growth_interval: int = 2000,
97
+ enabled: bool = True,
98
+ process_group: Optional[ProcessGroup] = dist.group.WORLD,
99
+ ) -> None:
100
+ super().__init__(
101
+ device,
102
+ init_scale=init_scale,
103
+ backoff_factor=backoff_factor,
104
+ growth_factor=growth_factor,
105
+ growth_interval=growth_interval,
106
+ enabled=enabled,
107
+ )
108
+ if self._enabled:
109
+ self.process_group = process_group
110
+ self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
111
+
112
+ @overload
113
+ def scale(self, outputs: torch.Tensor) -> torch.Tensor:
114
+ ...
115
+
116
+ @overload
117
+ def scale(self, outputs: List[torch.Tensor]) -> List[torch.Tensor]:
118
+ ...
119
+
120
+ @overload
121
+ def scale(self, outputs: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:
122
+ ...
123
+
124
+ @overload
125
+ def scale(self, outputs: Iterable[torch.Tensor]) -> Iterable[torch.Tensor]:
126
+ ...
127
+
128
+ def scale(
129
+ self, outputs: Union[torch.Tensor, Iterable[torch.Tensor]]
130
+ ) -> Union[torch.Tensor, Iterable[torch.Tensor]]:
131
+ if not self._enabled:
132
+ return outputs
133
+
134
+ if isinstance(outputs, torch.Tensor):
135
+ assert _is_supported_device(outputs)
136
+ if self._scale is None:
137
+ self._lazy_init_scale_growth_tracker(outputs.device)
138
+ assert self._scale is not None
139
+ scaled_output = outputs * self._scale.to(
140
+ device=outputs.device, non_blocking=True
141
+ )
142
+ # Here we ensure the return dtype is the same as the outputs dtype.
143
+ # For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision
144
+ # format (fp16, bf16) and so the scaled loss should be of the same dtype.
145
+ return scaled_output.type(outputs.dtype)
146
+
147
+ stash: List[_GeneralMultiDeviceReplicator] = []
148
+
149
+ def apply_scale(val: Union[torch.Tensor, Iterable[torch.Tensor]]):
150
+ if isinstance(val, torch.Tensor):
151
+ assert _is_supported_device(val)
152
+ if len(stash) == 0:
153
+ if self._scale is None:
154
+ self._lazy_init_scale_growth_tracker(val.device)
155
+ assert self._scale is not None
156
+ stash.append(_GeneralMultiDeviceReplicator(self._scale))
157
+ scaled_val = val * stash[0].get(val.device)
158
+ # Here we ensure the return dtype is the same as the outputs dtype.
159
+ # For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision
160
+ # format (fp16, bf16) and so the scaled loss should be of the same dtype.
161
+ return scaled_val.type(val.dtype)
162
+ if isinstance(val, abc.Iterable):
163
+ iterator = map(apply_scale, val)
164
+ if isinstance(val, (list, tuple)):
165
+ return type(val)(iterator)
166
+ return iterator
167
+ raise ValueError("outputs must be a Tensor or an iterable of Tensors")
168
+
169
+ return apply_scale(outputs)
170
+
171
+ def _foreach_non_finite_check_and_unscale_cpu_(
172
+ self,
173
+ grads: Sequence[torch.Tensor],
174
+ found_inf: torch.Tensor,
175
+ inv_scale: torch.Tensor,
176
+ ) -> None:
177
+ if len(grads) == 0:
178
+ return
179
+ assert inv_scale.numel() == 1, "inv_scale must be a 1-element tensor."
180
+ assert found_inf.numel() == 1, "found_inf must be a 1-element tensor."
181
+
182
+ for grad in grads:
183
+ if grad.device.type != "cpu":
184
+ logger.error(
185
+ "tensor device is %s but was expected to be ``cpu``",
186
+ grad.device,
187
+ )
188
+ raise ValueError(
189
+ "Gradients were found on a non-CPU device when"
190
+ " expected to be on CPU."
191
+ )
192
+ if (
193
+ torch.isinf(grad).any().item() is True
194
+ or torch.isnan(grad).any().item() is True
195
+ ):
196
+ found_inf.data = torch.tensor([1.0])
197
+ break
198
+ else:
199
+ grad.data *= inv_scale.item()
200
+
201
+ def _unscale_grads_(
202
+ self,
203
+ optimizer: torch.optim.Optimizer,
204
+ inv_scale: torch.Tensor,
205
+ found_inf: torch.Tensor,
206
+ allow_fp16: bool = True,
207
+ ) -> Dict[torch.device, torch.Tensor]:
208
+ per_device_inv_scale = _GeneralMultiDeviceReplicator(inv_scale)
209
+ per_device_found_inf = _GeneralMultiDeviceReplicator(found_inf)
210
+
211
+ # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
212
+ # There could be thousands of grads, so we'd like to iterate through them just once.
213
+ # However, we don't know their devices or dtypes in advance.
214
+
215
+ # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
216
+ # Google says mypy struggles with defaultdicts type annotations.
217
+ per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
218
+ with torch.no_grad():
219
+ for group in optimizer.param_groups:
220
+ for param in group["params"]:
221
+ if param.grad is None:
222
+ continue
223
+ if (not allow_fp16) and param.grad.dtype == torch.float16:
224
+ raise ValueError("Attempting to unscale FP16 gradients.")
225
+ if param.grad.is_sparse:
226
+ # is_coalesced() == False means the sparse grad has values with duplicate indices.
227
+ # coalesce() deduplicates indices and adds all values that have the same index.
228
+ # For scaled fp16 values, there's a good chance coalescing will cause overflow,
229
+ # so we should check the coalesced _values().
230
+ if param.grad.dtype is torch.float16:
231
+ # coalesce is not supported in torch.float16
232
+ param_grad_fp32 = param.grad.type(torch.float32).coalesce()
233
+ param.grad = param_grad_fp32.type(torch.float16)
234
+ to_unscale = param.grad._values()
235
+ else:
236
+ to_unscale = param.grad
237
+
238
+ per_device_and_dtype_grads[to_unscale.device][
239
+ to_unscale.dtype
240
+ ].append(to_unscale)
241
+
242
+ for device, per_dtype_grads in per_device_and_dtype_grads.items():
243
+ for grads in per_dtype_grads.values():
244
+ if grads[0].device.type == "cpu":
245
+ self._foreach_non_finite_check_and_unscale_cpu_(
246
+ grads,
247
+ per_device_found_inf.get(device),
248
+ per_device_inv_scale.get(device),
249
+ )
250
+ else:
251
+ torch._amp_foreach_non_finite_check_and_unscale_(
252
+ grads,
253
+ per_device_found_inf.get(device),
254
+ per_device_inv_scale.get(device),
255
+ )
256
+ # There exist contexts (e.g. w/ `use_orig_params=True`) wherein some
257
+ # ranks may have no (non-zero sized) parameter shards, necessitating the
258
+ # initialization of `per_device_found_inf._per_device_tensors` here
259
+ if not per_device_found_inf._per_device_tensors:
260
+ assert self._scale is not None
261
+ per_device_found_inf.get(self._scale.device)
262
+ return per_device_found_inf._per_device_tensors
263
+
264
+ def unscale_(self, optimizer: torch.optim.Optimizer) -> None:
265
+ if not self._enabled:
266
+ return
267
+
268
+ self._check_scale_growth_tracker("unscale_")
269
+
270
+ optimizer_state = self._per_optimizer_states[id(optimizer)]
271
+
272
+ if optimizer_state["stage"] is OptState.UNSCALED:
273
+ raise RuntimeError(
274
+ "unscale_() has already been called on this optimizer since the last update()."
275
+ )
276
+ elif optimizer_state["stage"] is OptState.STEPPED:
277
+ raise RuntimeError("unscale_() is being called after step().")
278
+
279
+ # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
280
+ assert self._scale is not None
281
+ inv_scale = self._scale.double().reciprocal().float()
282
+ found_inf = torch.full(
283
+ (1,), 0.0, dtype=torch.float32, device=self._scale.device
284
+ )
285
+
286
+ optimizer_state["found_inf_per_device"] = self._unscale_grads_(
287
+ optimizer, inv_scale, found_inf, True
288
+ )
289
+ optimizer_state["stage"] = OptState.UNSCALED
290
+
291
+ # Synchronize the detected inf across the ranks
292
+ optimizer_state = self._per_optimizer_states[id(optimizer)]
293
+ works = []
294
+ found_inf_on_cpus = []
295
+ found_inf_on_devices = []
296
+
297
+ for found_inf in optimizer_state["found_inf_per_device"].values():
298
+ if self._device != "cpu" and found_inf.device.type == "cpu":
299
+ found_inf_on_cpus.append(found_inf)
300
+ found_inf_on_device = found_inf.to(self._device)
301
+ found_inf_on_devices.append(found_inf_on_device)
302
+ works.append(
303
+ dist.all_reduce(
304
+ found_inf_on_device, async_op=True, group=self.process_group
305
+ )
306
+ )
307
+ else:
308
+ works.append(
309
+ dist.all_reduce(found_inf, async_op=True, group=self.process_group)
310
+ )
311
+ for work in works:
312
+ work.wait()
313
+ if found_inf_on_cpus:
314
+ torch._foreach_copy_(found_inf_on_cpus, found_inf_on_devices)
315
+
316
+ def _amp_update_scale_cpu_(self, found_inf: torch.Tensor) -> None:
317
+ """
318
+ If found_inf is 1.0 (True), then scale is multiplied by backoff_factor and growth_tracker is set to zero.
319
+ Otherwise, scale is multiplied by the growth factor when the growth interval is reached.
320
+ """
321
+ assert self._scale is not None and self._growth_tracker is not None
322
+
323
+ if found_inf.item() >= 1.0:
324
+ self._scale *= self._backoff_factor
325
+ self._growth_tracker.fill_(0)
326
+ else:
327
+ successful = self._growth_tracker + 1
328
+ if successful == self._growth_interval:
329
+ self._scale *= self._growth_factor
330
+ self._growth_tracker.fill_(0)
331
+ else:
332
+ self._growth_tracker = successful
333
+
334
+ def update(self, new_scale: Optional[Union[float, torch.Tensor]] = None) -> None:
335
+ """
336
+ Updates the scale factor.
337
+ If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
338
+ to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
339
+ the scale is multiplied by ``growth_factor`` to increase it.
340
+ Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
341
+ used directly, it's used to fill GradScaler's internal scale tensor. So if
342
+ ``new_scale`` was a tensor, later in-place changes to that tensor will not further
343
+ affect the scale GradScaler uses internally.)
344
+ Args:
345
+ new_scale (float or :class:`torch.Tensor`, optional, default=None): New scale factor.
346
+ .. warning::
347
+ :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
348
+ been invoked for all optimizers used this iteration.
349
+ """
350
+
351
+ if not self._enabled:
352
+ return
353
+
354
+ _scale, _growth_tracker = self._check_scale_growth_tracker("update") # type: ignore[var-annotated]
355
+
356
+ if new_scale is not None:
357
+ # Accept a new user-defined scale.
358
+ if isinstance(new_scale, float):
359
+ self._scale.fill_(new_scale) # type: ignore[union-attr]
360
+ else:
361
+ reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor or \
362
+ torch.FloatTensor with requires_grad=False."
363
+ assert new_scale.device.type == self._device, reason
364
+ assert new_scale.numel() == 1, reason
365
+ assert new_scale.requires_grad is False, reason
366
+ self._scale.copy_(new_scale) # type: ignore[union-attr]
367
+ else:
368
+ # Consume shared inf/nan data collected from optimizers to update the scale.
369
+ # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
370
+ found_infs = [
371
+ found_inf.to(device=_scale.device, non_blocking=True)
372
+ for state in self._per_optimizer_states.values()
373
+ for found_inf in state["found_inf_per_device"].values()
374
+ ]
375
+
376
+ assert len(found_infs) > 0, "No inf checks were recorded prior to update."
377
+
378
+ found_inf_combined = found_infs[0]
379
+ if len(found_infs) > 1:
380
+ for i in range(1, len(found_infs)):
381
+ found_inf_combined += found_infs[i]
382
+
383
+ if _scale.device.type == "cpu":
384
+ self._amp_update_scale_cpu_(found_inf_combined)
385
+ else:
386
+ torch._amp_update_scale_(
387
+ self._scale, # type: ignore[arg-type]
388
+ self._growth_tracker, # type: ignore[arg-type]
389
+ found_inf_combined,
390
+ self._growth_factor, # type: ignore[arg-type]
391
+ self._backoff_factor, # type: ignore[arg-type]
392
+ self._growth_interval, # type: ignore[arg-type]
393
+ )
394
+
395
+ # To prepare for next iteration, clear the data collected from optimizers this iteration.
396
+ self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/fsdp/wrap.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+ #
4
+ # This source code is licensed under the BSD license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import contextlib
8
+ import copy
9
+ from abc import ABC, abstractmethod
10
+ from typing import (
11
+ Any,
12
+ Callable,
13
+ cast,
14
+ Dict,
15
+ Generator,
16
+ Iterable,
17
+ Optional,
18
+ Sequence,
19
+ Set,
20
+ Tuple,
21
+ Type,
22
+ Union,
23
+ )
24
+
25
+ import torch.nn as nn
26
+
27
+
28
+ __all__ = [
29
+ "always_wrap_policy",
30
+ "lambda_auto_wrap_policy",
31
+ "transformer_auto_wrap_policy",
32
+ "size_based_auto_wrap_policy",
33
+ "enable_wrap",
34
+ "wrap",
35
+ "CustomPolicy",
36
+ "ModuleWrapPolicy",
37
+ ]
38
+
39
+
40
+ # NOTE: We intentionally keep this function simple and isolate the complexity
41
+ # to `fn` to enable using this function generically. We may move this to a
42
+ # non-FSDP-specific folder and/or make it public in the future.
43
+ def _post_order_apply(
44
+ root_module: nn.Module,
45
+ fn: Callable[[nn.Module], Optional[nn.Module]],
46
+ ):
47
+ """
48
+ This applies ``fn`` to every module in the module tree of ``root_module``
49
+ following a post-order traversal. If ``fn`` returns an :class:`nn.Module`,
50
+ then this replaces the original module with the newly returned one in the
51
+ tree. Otherwise, ``fn`` should return ``None``, in which case the module is
52
+ not changed.
53
+ """
54
+ # Track visited modules to avoid visiting shared modules multiple times
55
+ visited_modules: Set[nn.Module] = {root_module}
56
+
57
+ def _post_order_apply_inner(
58
+ module: nn.Module,
59
+ module_name: str,
60
+ parent_module: Optional[nn.Module],
61
+ ):
62
+ for child_module_name, child_module in module.named_children():
63
+ if child_module not in visited_modules:
64
+ visited_modules.add(child_module)
65
+ _post_order_apply_inner(child_module, child_module_name, module)
66
+ optional_module = fn(module)
67
+ if optional_module is not None:
68
+ assert isinstance(parent_module, nn.Module), (
69
+ "Non-root modules should have their parent module set but got "
70
+ f"{parent_module} for {module}"
71
+ )
72
+ assert module_name, (
73
+ "Non-root modules should have their module name set but got "
74
+ f"an empty module name for {module}"
75
+ )
76
+ assert isinstance(
77
+ optional_module, nn.Module
78
+ ), f"fn should return None or an nn.Module but got {optional_module}"
79
+ setattr(parent_module, module_name, optional_module)
80
+
81
+ _post_order_apply_inner(root_module, "", None)
82
+
83
+
84
+ def _construct_wrap_fn(
85
+ root_module: nn.Module,
86
+ target_module_to_kwargs: Dict[nn.Module, Dict[str, Any]],
87
+ fsdp_fn: Callable,
88
+ ) -> Callable[[nn.Module], Optional[nn.Module]]:
89
+ """
90
+ This constructs the "wrap" function to pass to :func:`_post_order_apply`
91
+ based on ``target_module_to_kwargs``, which should be constructed from the
92
+ wrapping policy.
93
+ """
94
+
95
+ def fn(module: nn.Module) -> Optional[nn.Module]:
96
+ # Explicitly avoid wrapping the root module since for FSDP, it is
97
+ # handled by the caller
98
+ if module in target_module_to_kwargs and module is not root_module:
99
+ kwargs = target_module_to_kwargs[module]
100
+ return fsdp_fn(module, **kwargs)
101
+ return None
102
+
103
+ return fn
104
+
105
+
106
+ def _run_mixed_precision_override_policy(
107
+ root_module: nn.Module,
108
+ module_classes: Iterable[Type[nn.Module]],
109
+ ignored_modules: Set[nn.Module],
110
+ root_kwargs: Dict[str, Any],
111
+ target_module_to_kwargs: Dict[nn.Module, Dict[str, Any]],
112
+ ):
113
+ module_classes_tuple = tuple(set(module_classes))
114
+ for module in root_module.modules():
115
+ if module in ignored_modules:
116
+ continue
117
+ elif isinstance(module, module_classes_tuple):
118
+ # This policy overrides any existing policy
119
+ if module not in target_module_to_kwargs:
120
+ # Only inherit from the root kwargs if not already specified
121
+ target_module_to_kwargs[module] = root_kwargs
122
+ target_module_to_kwargs[module]["mixed_precision"] = None
123
+ return target_module_to_kwargs
124
+
125
+
126
+ def always_wrap_policy(*args, **kwargs) -> bool:
127
+ """
128
+ A simple recursive wrap policy that always returns ``True``. This means
129
+ that every submodule is wrapped by the wrapper class in
130
+ :func:`_recursive_wrap`.
131
+ """
132
+ return True
133
+
134
+
135
+ class _Policy(ABC):
136
+ """
137
+ This defines an abstract base class that represents a policy for applying
138
+ a module-level API.
139
+ """
140
+
141
+ @abstractmethod
142
+ def _run_policy(
143
+ self,
144
+ root_module: nn.Module,
145
+ ignored_modules: Set[nn.Module],
146
+ root_kwargs: Dict[str, Any],
147
+ ) -> Dict[nn.Module, Dict[str, Any]]:
148
+ """
149
+ This should return a dict ``target_module_to_kwargs`` that maps from
150
+ each target module to wrap to its kwargs.
151
+ """
152
+ ...
153
+
154
+
155
+ def _module_wrap_policy(
156
+ module: nn.Module,
157
+ recurse: bool,
158
+ nonwrapped_numel: int,
159
+ module_classes: Set[Type[nn.Module]],
160
+ ) -> bool:
161
+ """
162
+ This auto wrap policy wraps every module that is an instance of any type in
163
+ ``module_classes`` as its own FSDP instance. The root module given by
164
+ ``module`` is always wrapped as an FSDP instance regardless. Since the
165
+ wrapping proceeds bottom up, each FSDP instance manages the parameters in
166
+ its subtree excluding any already managed by a child FSDP instance.
167
+
168
+ Args:
169
+ module (nn.Module): Current module being considered.
170
+ recurse (bool): If ``False``, then this function must decide whether
171
+ ``module`` should be wrapped as an FSDP instance or not. If
172
+ ``True``, then the function is still recursing down the module
173
+ tree as a part of the DFS.
174
+ nonwrapped_numel (int): Parameter numel not yet wrapped.
175
+ module_classes (Set[Type[nn.Module]]): Set of module classes that are
176
+ wrapped as FSDP instances.
177
+
178
+ Returns:
179
+ ``True`` if ``recurse=True``, and whether ``module`` should be wrapped
180
+ if ``recurse=False``.
181
+ """
182
+ if recurse:
183
+ return True # always recurse
184
+ return isinstance(module, tuple(module_classes))
185
+
186
+
187
+ class ModuleWrapPolicy(_Policy):
188
+ """
189
+ This policy applies to every module of the specified module classes,
190
+ passing in the kwargs given to the root.
191
+ """
192
+
193
+ def __init__(self, module_classes: Iterable[Type[nn.Module]]):
194
+ module_classes_set = set(module_classes)
195
+ self._module_classes = module_classes_set
196
+ self._module_classes_str = str(module_classes_set)
197
+
198
+ def _run_policy(
199
+ self,
200
+ root_module: nn.Module,
201
+ ignored_modules: Set[nn.Module],
202
+ root_kwargs: Dict[str, Any],
203
+ ) -> Dict[nn.Module, Dict[str, Any]]:
204
+ module_classes = tuple(self._module_classes)
205
+ target_module_to_kwargs: Dict[nn.Module, Dict[str, Any]] = {}
206
+ for module in root_module.modules():
207
+ if module in ignored_modules:
208
+ continue
209
+ elif isinstance(module, module_classes):
210
+ # Shallow copy to avoid coupling changes across modules
211
+ target_module_to_kwargs[module] = copy.copy(root_kwargs)
212
+ return target_module_to_kwargs
213
+
214
+ def __call__(self, module, recurse, *args, **kwargs):
215
+ # nonwrapped_numel is not used.
216
+ return _module_wrap_policy(
217
+ module, recurse, nonwrapped_numel=-1, module_classes=self._module_classes
218
+ )
219
+
220
+ def __repr__(self) -> str:
221
+ return super().__repr__() + f"({self._module_classes_str})"
222
+
223
+
224
+ class CustomPolicy(_Policy):
225
+ """
226
+ This policy takes in a lambda function that maps a given ``nn.Module`` to
227
+ either ``False``, ``True``, or a kwarg dictionary.
228
+ - If the function returns ``False`` or an empty dictionary, then the module
229
+ does not have the API applied.
230
+ - If the function returns ``True``, then the module has the API applied
231
+ with the root's kwargs.
232
+ - If the function returns a non-empty dictionary, then the module has the
233
+ API applied, and the dictionary overrides the root's kwargs.
234
+
235
+ Example::
236
+
237
+ >>> # xdoctest: +SKIP("undefined variables")
238
+ >>> model = init_transformer_model(...)
239
+ >>> def lambda_fn(module: nn.Module):
240
+ >>> if module is model.lm_head:
241
+ >>> return {"sharding_strategy": ShardingStrategy.SHARD_GRAD_OP}
242
+ >>> elif isinstance(module, TransformerBlock):
243
+ >>> return True
244
+ >>> return False
245
+ >>> policy = CustomPolicy(lambda_fn)
246
+ >>> fsdp_model = FSDP(model, auto_wrap_policy=policy)
247
+ """
248
+
249
+ def __init__(self, lambda_fn: Callable[[nn.Module], Union[bool, Dict[str, Any]]]):
250
+ self._lambda_fn = lambda_fn
251
+
252
+ def _run_policy(
253
+ self,
254
+ root_module: nn.Module,
255
+ ignored_modules: Set[nn.Module],
256
+ root_kwargs: Dict[str, Any],
257
+ ) -> Dict[nn.Module, Dict[str, Any]]:
258
+ target_module_to_kwargs: Dict[nn.Module, Dict[str, Any]] = {}
259
+ for module in root_module.modules():
260
+ if module in ignored_modules:
261
+ continue
262
+ res = self._lambda_fn(module)
263
+ if not isinstance(res, (dict, bool)):
264
+ raise ValueError(
265
+ "The lambda_fn passed to CustomPolicy should return "
266
+ f"False/True or a kwarg dict, but it returned {res}"
267
+ )
268
+ if not res:
269
+ continue
270
+ kwargs = copy.copy(root_kwargs)
271
+ if isinstance(res, dict):
272
+ # Override the root kwargs with the ones specified by the
273
+ # lambda function
274
+ kwargs.update(res)
275
+ target_module_to_kwargs[module] = kwargs
276
+ return target_module_to_kwargs
277
+
278
+
279
+ def lambda_auto_wrap_policy(
280
+ module: nn.Module, recurse: bool, nonwrapped_numel: int, lambda_fn: Callable
281
+ ) -> bool:
282
+ """
283
+ A convenient auto wrap policy to wrap submodules based on an arbitrary user
284
+ function. If `lambda_fn(submodule) == True``, the submodule will be wrapped as
285
+ a `wrapper_cls` unit.
286
+
287
+ Return if a module should be wrapped during auto wrapping.
288
+
289
+ The first three parameters are required by :func:`_recursive_wrap`.
290
+
291
+ Args:
292
+ module (nn.Module): Current module being considered.
293
+ recurse (bool): If ``False``, then this function must decide whether
294
+ ``module`` should be wrapped as an FSDP instance or not. If
295
+ ``True``, then the function is still recursing down the module
296
+ tree as a part of the DFS.
297
+ nonwrapped_numel (int): Parameter numel not yet wrapped.
298
+
299
+ lambda_fn (Callable[[nn.Module], bool]): If this returns ``True``, then
300
+ this module will be wrapped.
301
+ """
302
+ if recurse:
303
+ return True # always recurse
304
+ return lambda_fn(module)
305
+
306
+
307
+ def transformer_auto_wrap_policy(
308
+ module: nn.Module,
309
+ recurse: bool,
310
+ nonwrapped_numel: int,
311
+ transformer_layer_cls: Set[Type[nn.Module]],
312
+ ) -> bool:
313
+ """
314
+ See :func:`_module_wrap_policy`, where ``transformer_layer_cls`` is the
315
+ same as ``module_classes``. Note that shared parameters must be wrapped in
316
+ the same FSDP instance, so this auto wrap policy can help wrap shared
317
+ embeddings into the same FSDP instance for transformer models.
318
+ """
319
+ return _module_wrap_policy(module, recurse, nonwrapped_numel, transformer_layer_cls)
320
+
321
+
322
+ def _wrap_module_cls_individually(
323
+ module: nn.Module, module_classes: Sequence[type], recurse: bool, *args, **kwargs
324
+ ):
325
+ if recurse:
326
+ # always recurse
327
+ return True
328
+ else:
329
+ # if not recursing, decide whether we should wrap based on whether the type of module
330
+ # is in `module_classes`.
331
+ return isinstance(module, tuple(module_classes))
332
+
333
+
334
+ def _or_policy(
335
+ module: nn.Module,
336
+ recurse: bool,
337
+ nonwrapped_numel: int,
338
+ policies,
339
+ ) -> bool:
340
+ """
341
+ A policy that wraps ``module`` if any policy in the passed in iterable of
342
+ ``policies`` returns ``True``.
343
+ """
344
+ return any(
345
+ policy(module=module, recurse=recurse, nonwrapped_numel=nonwrapped_numel)
346
+ for policy in policies
347
+ )
348
+
349
+
350
+ def size_based_auto_wrap_policy(
351
+ module: nn.Module,
352
+ recurse: bool,
353
+ nonwrapped_numel: int,
354
+ # Additional custom arguments
355
+ min_num_params: int = int(1e8),
356
+ force_leaf_modules: Optional[Set[Type[nn.Module]]] = None,
357
+ exclude_wrap_modules: Optional[Set[Type[nn.Module]]] = None,
358
+ ) -> bool:
359
+ """
360
+ A size-based auto wrap policy.
361
+
362
+ Args:
363
+ module (nn.Module): Current module being considered.
364
+ recurse (bool): If ``False``, then this function must decide whether
365
+ ``module`` should be wrapped as an FSDP instance or not. If
366
+ ``True``, then the function is still recursing down the module
367
+ tree as a part of the DFS.
368
+ nonwrapped_numel (int): Parameter numel not yet wrapped.
369
+
370
+ min_num_params (int): Customizable policy input that controls the size
371
+ threshold over which a module is ready to be wrapped. This is in
372
+ units of numel.
373
+ force_leaf_modules (Set[Type[nn.Module]]): Set of module types to keep
374
+ as leaves, i.e. their children will never be wrapped.
375
+ exclude_wrap_modules (Set[Type[nn.Module]]): Set of module types to be
376
+ excluded in wrapping.
377
+
378
+ Returns:
379
+ Whether ``module`` should be wrapped.
380
+ """
381
+ force_leaf_modules = (
382
+ size_based_auto_wrap_policy.FORCE_LEAF_MODULES # type: ignore[attr-defined]
383
+ if force_leaf_modules is None
384
+ else force_leaf_modules
385
+ )
386
+ exclude_wrap_modules = (
387
+ size_based_auto_wrap_policy.EXCLUDE_WRAP_MODULES # type: ignore[attr-defined]
388
+ if exclude_wrap_modules is None
389
+ else exclude_wrap_modules
390
+ )
391
+
392
+ # Keep the argument `min_num_params` for BC for now, but it represents the
393
+ # minimum non-wrapped *numel* before triggering a wrapping
394
+ min_nonwrapped_numel = min_num_params
395
+ is_large = nonwrapped_numel >= min_nonwrapped_numel
396
+ if recurse:
397
+ # We should recurse if the module is big enough but not in force_leaf_modules list.
398
+ return is_large and not isinstance(module, tuple(force_leaf_modules))
399
+ else:
400
+ # If we are not recursing, determine if we should wrap.
401
+ return is_large and not isinstance(module, tuple(exclude_wrap_modules))
402
+
403
+
404
+ # Set those defaults to the size_based_auto_wrap_policy function. Make them easy to be imported.
405
+ size_based_auto_wrap_policy.EXCLUDE_WRAP_MODULES = {nn.ModuleList, nn.ModuleDict} # type: ignore[attr-defined]
406
+ size_based_auto_wrap_policy.FORCE_LEAF_MODULES = {nn.MultiheadAttention} # type: ignore[attr-defined]
407
+
408
+
409
+ @contextlib.contextmanager
410
+ def enable_wrap(
411
+ *, wrapper_cls: Any, **wrapper_kwargs: Any
412
+ ) -> Generator[None, None, None]:
413
+ """
414
+ Context manager to wrap modules using a wrapper.
415
+
416
+ Useful for when you'd like to apply the same configuration arguments to all
417
+ child modules that you wrap. A particularly important use case is wrapping
418
+ large layers so that they get sharded (in-place) during initialization, to
419
+ avoid running out of system memory. Large layers can indicate that they
420
+ should be sharded via the ``wrap`` annotation and this context manager can
421
+ provide the exact configuration for these nested instances.
422
+
423
+ Usage::
424
+
425
+ with enable_wrap(wrapper_cls, **params):
426
+ # Wraps layer in FSDP by default if within context
427
+ self.l1 = wrap(torch.nn.Linear(5, 5))
428
+
429
+ Args:
430
+ wrapper_cls:
431
+ Class that `wrap` annotation will `wrap` modules with, such as
432
+ `FullyShardedDataParallel`.
433
+ **wrapper_kwargs:
434
+ Configuration settings that will be passed to all ``wrap``
435
+ instances inside the context
436
+ """
437
+ kwargs = {
438
+ "wrapper_cls": wrapper_cls,
439
+ **wrapper_kwargs,
440
+ }
441
+ with _ConfigAutoWrap(**kwargs):
442
+ yield
443
+
444
+
445
+ def wrap(module: nn.Module, **wrap_overrides: Any) -> nn.Module:
446
+ """
447
+ Annotate that a module should be wrapped. Annotated modules will only be
448
+ wrapped if inside of an :func:`enable_wrap` context manager. This allows
449
+ a module to be initialized both with and without a wrapper without code
450
+ change.
451
+
452
+ The class that this function wraps the passed in ``nn.Module`` with is the
453
+ passed in ``wrapper_cls`` argument into ``enable_wrap``. Both
454
+ ``enable_wrap`` and ``wrap`` can take in kwargs specifying how to construct
455
+ the ``wrapper_cls`` instance. In the case of duplicate kwargs in
456
+ ``enable_wrap`` and ``wrap``, the argument passed into ``wrap`` will be
457
+ respected.
458
+
459
+ Usage::
460
+
461
+ with enable_wrap(wrapper_cls=FSDP, **fsdp_config):
462
+ # Wraps layer in FSDP by default if within context
463
+ self.l1 = wrap(torch.nn.Linear(5, 5))
464
+
465
+ Args:
466
+ module (nn.Module): module to wrap (if in :func:`enable_wrap` context)
467
+ **wrap_overrides: configuration overrides that will take priority over
468
+ the values provided by the :func:`enable_wrap` context
469
+ """
470
+ if _ConfigAutoWrap.in_autowrap_context:
471
+ assert _ConfigAutoWrap.wrapper_cls is not None
472
+
473
+ wrap_overrides = {**_ConfigAutoWrap.kwargs, **wrap_overrides}
474
+ return _wrap(
475
+ module,
476
+ _ConfigAutoWrap.wrapper_cls,
477
+ **wrap_overrides,
478
+ )
479
+ return module
480
+
481
+
482
+ def _wrap(module: nn.Module, wrapper_cls: Callable, **kwargs) -> nn.Module:
483
+ assert wrapper_cls is not None
484
+ if hasattr(module, "_wrap_overrides"):
485
+ # If module has a _wrap_overrides attribute, we force overriding the
486
+ # FSDP config with these attributes for this module. Currently this
487
+ # is only used to disable mixed precision for BatchNorm when
488
+ # auto_wrapping.
489
+ overrides = {**kwargs, **module._wrap_overrides} # type: ignore[arg-type]
490
+ return wrapper_cls(module, **overrides)
491
+
492
+ return wrapper_cls(module, **kwargs)
493
+
494
+
495
+ def _recursive_wrap(
496
+ module: nn.Module,
497
+ auto_wrap_policy: Callable,
498
+ wrapper_cls: Callable,
499
+ ignored_modules: Set[nn.Module],
500
+ ignored_params: Set[nn.Parameter],
501
+ only_wrap_children: bool = False,
502
+ **kwargs: Any,
503
+ ) -> Tuple[nn.Module, int]:
504
+ """
505
+ Wraps submodules of ``module`` for which ``auto_wrap_policy`` returns
506
+ ``True`` with ``wrapper_cls``.
507
+
508
+ Args:
509
+ module (nn.Module): Module to recursively wrap.
510
+ auto_wrap_policy (Callable): A callable representing a policy that
511
+ determines which modules to recursively wrap with ``wrapper_cls``.
512
+ ignored_modules (Set[torch.nn.Module]): Modules to ignore when
513
+ wrapping.
514
+ ignored_params (Set[torch.nn.Parameter]): Parameters to ignore when
515
+ wrapping; these should be the parameters contained in the modules
516
+ in ``ignored_modules``.
517
+ Returns:
518
+ (nn.Module, int):
519
+ ``module`` after wrapping and the numel recursively wrapped.
520
+ """
521
+ assert auto_wrap_policy is not None, "Must specify auto_wrap_policy."
522
+ assert wrapper_cls is not None, "Must specify wrapper_cls"
523
+ # Make sure no child is already wrapped.
524
+ for _, child in module.named_modules():
525
+ if child in ignored_modules:
526
+ continue
527
+ try:
528
+ assert not isinstance(child, cast(type, wrapper_cls))
529
+ except TypeError:
530
+ # wrapper_cls is a function as opposed to a class type, just bypass above check.
531
+ pass
532
+
533
+ # We count all params, assuming none of them are already wrapped.
534
+ nonwrapped_numel = sum(
535
+ p.numel() for p in module.parameters() if p not in ignored_params
536
+ )
537
+
538
+ assert auto_wrap_policy is not None
539
+ if auto_wrap_policy(module=module, recurse=True, nonwrapped_numel=nonwrapped_numel):
540
+ total_wrapped_numel = 0
541
+ # Iterate through the children, recursively wrap if necessary
542
+ for name, child in module.named_children():
543
+ if child in ignored_modules:
544
+ continue
545
+ wrapped_child, num_wrapped_params = _recursive_wrap(
546
+ module=child,
547
+ auto_wrap_policy=auto_wrap_policy,
548
+ wrapper_cls=wrapper_cls,
549
+ ignored_modules=ignored_modules,
550
+ ignored_params=ignored_params,
551
+ **kwargs,
552
+ )
553
+ setattr(module, name, wrapped_child)
554
+ # Keep track of how many parameters have been wrapped
555
+ total_wrapped_numel += num_wrapped_params
556
+ # decide if we need to wrap the current module,
557
+ # since the left over parameters exceed the number of params to wrap
558
+ remainder = nonwrapped_numel - total_wrapped_numel
559
+ if not only_wrap_children and auto_wrap_policy(
560
+ module=module, recurse=False, nonwrapped_numel=remainder
561
+ ):
562
+ # Leaf node or final wrapping of the remainder both happen here.
563
+ return _wrap(module, wrapper_cls, **kwargs), nonwrapped_numel
564
+ else:
565
+ return module, total_wrapped_numel
566
+ return module, 0
567
+
568
+
569
+ class _ConfigAutoWrap:
570
+ """
571
+ Helper class to wrap modules based on default config args via a context manager.
572
+ See :func:`enable_wrap` for more information.
573
+ """
574
+
575
+ in_autowrap_context: bool = False # Context flag
576
+ wrapper_cls: Optional[Callable] = None # The wrapper class
577
+ kwargs: Dict[str, Any] = {} # Wrapper's args
578
+
579
+ def __init__(self, **kwargs: Dict[str, Any]):
580
+ self.kwargs = kwargs
581
+
582
+ @staticmethod
583
+ def enable_autowrap_context(kwargs: Any) -> None:
584
+ if _ConfigAutoWrap.in_autowrap_context:
585
+ raise NotImplementedError(
586
+ "You are already within an autowrap context and we currently do not supported nested autowrap."
587
+ )
588
+ _ConfigAutoWrap.in_autowrap_context = True
589
+ # Get and save the wrapper cls for the context.
590
+ assert (
591
+ "wrapper_cls" in kwargs.keys()
592
+ ), "Expected to pass in wrapper_cls arg into _ConfigAutoWrap."
593
+ _ConfigAutoWrap.wrapper_cls = cast(Callable, kwargs["wrapper_cls"])
594
+ del kwargs["wrapper_cls"]
595
+ # Save the rest.
596
+ _ConfigAutoWrap.kwargs = kwargs
597
+
598
+ @staticmethod
599
+ def disable_autowrap_context() -> None:
600
+ _ConfigAutoWrap.in_autowrap_context = False
601
+ _ConfigAutoWrap.wrapper_cls = None
602
+ _ConfigAutoWrap.kwargs = {}
603
+
604
+ def __enter__(self) -> None:
605
+ self.enable_autowrap_context(self.kwargs)
606
+
607
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
608
+ self.disable_autowrap_context()
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/api.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/options.cpython-310.pyc ADDED
Binary file (7.16 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/rpc/__pycache__/rref_proxy.cpython-310.pyc ADDED
Binary file (2.53 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch
4
+
5
+
6
+ def is_available():
7
+ return hasattr(torch._C, "_faulty_agent_init")
8
+
9
+
10
+ if is_available() and not torch._C._faulty_agent_init():
11
+ raise RuntimeError("Failed to initialize torch.distributed.rpc._testing")
12
+
13
+ if is_available():
14
+ # Registers FAULTY_TENSORPIPE RPC backend.
15
+ from torch._C._distributed_rpc_testing import (
16
+ FaultyTensorPipeAgent,
17
+ FaultyTensorPipeRpcBackendOptions,
18
+ )
19
+
20
+ from . import faulty_agent_backend_registry
infer_4_47_1/lib/python3.10/site-packages/torch/distributed/rpc/_testing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (656 Bytes). View file