ZTWHHH commited on
Commit
f4ecc82
·
verified ·
1 Parent(s): 1e79187

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py +1 -0
  2. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc +0 -0
  3. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py +0 -0
  4. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__init__.py +0 -0
  5. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc +0 -0
  6. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc +0 -0
  7. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc +0 -0
  8. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc +0 -0
  9. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc +0 -0
  10. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc +0 -0
  11. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc +0 -0
  12. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py +1 -0
  13. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py +98 -0
  14. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc +0 -0
  15. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc +0 -0
  16. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py +136 -0
  17. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py +66 -0
  18. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py +42 -0
  19. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py +548 -0
  20. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py +51 -0
  21. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py +122 -0
  22. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py +733 -0
  23. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py +0 -0
  24. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py +66 -0
  25. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py +31 -0
  26. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py +543 -0
  27. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py +0 -0
  28. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc +0 -0
  29. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py +0 -0
  30. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__init__.py +0 -0
  31. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/tensorpipe_rpc_agent_test_fixture.cpython-310.pyc +0 -0
  32. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py +0 -0
  33. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py +281 -0
  34. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__init__.py +0 -0
  35. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/__init__.cpython-310.pyc +0 -0
  36. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc +0 -0
  37. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc +0 -0
  38. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py +144 -0
  39. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py +261 -0
  40. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py +326 -0
  41. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py +62 -0
  42. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__init__.py +0 -0
  43. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc +0 -0
  44. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc +0 -0
  45. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/dist_autograd_test.py +116 -0
  46. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py +1385 -0
  47. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py +218 -0
  48. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py +63 -0
  49. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py +0 -0
  50. infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py +34 -0
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # mypy: ignore-errors
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__init__.py ADDED
File without changes
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (193 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc ADDED
Binary file (2.47 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc ADDED
Binary file (1.4 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc ADDED
Binary file (17.8 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc ADDED
Binary file (4.86 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # mypy: allow-untyped-defs
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import sys
4
+ from functools import wraps, partial
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+ from torch.distributed import rpc
9
+ from torch.testing._internal.common_distributed import (
10
+ MultiProcessTestCase,
11
+ TEST_SKIPS,
12
+ tp_transports,
13
+ )
14
+
15
+ TEST_GPU_NUM = 4
16
+
17
+ class ShardedTensorTestBase(MultiProcessTestCase):
18
+ @property
19
+ def world_size(self):
20
+ return TEST_GPU_NUM
21
+
22
+ def init_pg(self, backend="nccl"):
23
+ if backend not in ["nccl", "gloo", "mpi"]:
24
+ raise RuntimeError(f"Backend {backend} not supported!")
25
+
26
+ dist.init_process_group(
27
+ backend=backend,
28
+ world_size=self.world_size,
29
+ rank=self.rank,
30
+ init_method=f"file://{self.file_name}",
31
+ )
32
+
33
+ # set device for nccl pg for collectives
34
+ if backend == "nccl":
35
+ torch.cuda.set_device(self.rank)
36
+
37
+
38
+ def init_rpc(self):
39
+ rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports())
40
+ rpc_backend_options.init_method = f"file://{self.file_name}"
41
+ for rank in range(self.world_size):
42
+ rpc_backend_options.set_device_map(
43
+ f"worker{rank}", {rank: self.rank, self.rank: rank}
44
+ )
45
+
46
+ rpc.init_rpc(
47
+ name="worker%d" % self.rank,
48
+ rank=self.rank,
49
+ world_size=self.world_size,
50
+ rpc_backend_options=rpc_backend_options,
51
+ )
52
+
53
+ def init_comms(self, init_rpc=True, backend="nccl"):
54
+ if init_rpc:
55
+ self.init_rpc()
56
+ self.init_pg(backend=backend)
57
+
58
+ def destroy_comms(self, destroy_rpc=True):
59
+ # Wait for all ranks to reach here before starting shutdown.
60
+ dist.barrier()
61
+
62
+ if destroy_rpc:
63
+ rpc.shutdown()
64
+ dist.destroy_process_group()
65
+
66
+ def setUp(self) -> None:
67
+ super().setUp()
68
+ self._spawn_processes()
69
+
70
+ def assert_sharded_tensor_equal(self, st1, st2):
71
+ st1_local_shards = st1.local_shards()
72
+ st2_local_shards = st2.local_shards()
73
+ self.assertEqual(len(st1_local_shards), len(st2_local_shards))
74
+ for i, st1_local_shard in enumerate(st1_local_shards):
75
+ self.assertEqual(st1_local_shard.tensor, st2_local_shards[i].tensor)
76
+ self.assertEqual(st1_local_shard.metadata, st2_local_shards[i].metadata)
77
+
78
+ self.assertEqual(st1.metadata(), st2.metadata())
79
+ self.assertEqual(st1.sharding_spec(), st2.sharding_spec())
80
+ self.assertEqual(len(st1.remote_shards()), len(st2.remote_shards()))
81
+
82
+ # wrapper to initialize comms (processgroup + rpc)
83
+ def with_comms(func=None, init_rpc=True, backend="nccl"):
84
+ if func is None:
85
+ return partial(
86
+ with_comms,
87
+ init_rpc=init_rpc,
88
+ backend=backend,
89
+ )
90
+
91
+ @wraps(func)
92
+ def wrapper(self, *args, **kwargs):
93
+ if backend == "nccl" and torch.cuda.device_count() < self.world_size:
94
+ sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
95
+ self.init_comms(init_rpc=init_rpc, backend=backend)
96
+ func(self, *args, **kwargs)
97
+ self.destroy_comms(destroy_rpc=init_rpc)
98
+ return wrapper
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc ADDED
Binary file (3.04 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import builtins
4
+
5
+ import torch
6
+ from torch.distributed._shard.sharding_spec import (
7
+ ChunkShardingSpec,
8
+ EnumerableShardingSpec,
9
+ ShardMetadata,
10
+ )
11
+ from torch.distributed._shard.sharding_spec._internals import (
12
+ get_chunked_dim_size,
13
+ get_split_size,
14
+ )
15
+
16
+
17
+ def generate_chunk_sharding_specs_for_test(sharding_dim):
18
+ return [
19
+ ChunkShardingSpec(
20
+ dim=sharding_dim,
21
+ placements=[
22
+ "rank:0/cuda:0",
23
+ "rank:1/cuda:1",
24
+ "rank:2/cuda:2",
25
+ "rank:3/cuda:3",
26
+ ],
27
+ ),
28
+ # Test different ordering. (Case 1)
29
+ ChunkShardingSpec(
30
+ dim=sharding_dim,
31
+ placements=[
32
+ "rank:2/cuda:2",
33
+ "rank:3/cuda:3",
34
+ "rank:0/cuda:0",
35
+ "rank:1/cuda:1",
36
+ ],
37
+ ),
38
+ # Test different ordering. (Case 2)
39
+ ChunkShardingSpec(
40
+ dim=sharding_dim,
41
+ placements=[
42
+ "rank:3/cuda:3",
43
+ "rank:0/cuda:0",
44
+ "rank:1/cuda:1",
45
+ "rank:2/cuda:2",
46
+ ],
47
+ ),
48
+ ]
49
+
50
+
51
+ def generate_enumerable_sharding_specs_for_test():
52
+ return [
53
+ EnumerableShardingSpec(
54
+ [
55
+ ShardMetadata(
56
+ shard_offsets=[0, 0],
57
+ shard_sizes=[5, 5],
58
+ placement="rank:0/cuda:0",
59
+ ),
60
+ ShardMetadata(
61
+ shard_offsets=[5, 0],
62
+ shard_sizes=[5, 5],
63
+ placement="rank:1/cuda:1",
64
+ ),
65
+ ShardMetadata(
66
+ shard_offsets=[0, 5],
67
+ shard_sizes=[5, 5],
68
+ placement="rank:2/cuda:2",
69
+ ),
70
+ ShardMetadata(
71
+ shard_offsets=[5, 5],
72
+ shard_sizes=[5, 5],
73
+ placement="rank:3/cuda:3",
74
+ ),
75
+ ]
76
+ )
77
+ ]
78
+
79
+
80
+ def generate_local_weight_sharding_params_for_test(
81
+ local_weight, sharded_dim, gpu_num, spec, rank
82
+ ):
83
+ """
84
+ Shard the local weight based the given spec, so we can compare against
85
+ the one from sharded tensor.
86
+
87
+ Args:
88
+ local_weight: weight matrix to be sharded.
89
+ sharded_dim: The dimension which we shard on.
90
+ gpu_num: number of ranks.
91
+ spec: sharding spec.
92
+ rank: # of cuda process.
93
+
94
+ Returns:
95
+ start_pos: start position of sharded weight on the given rank.
96
+ chunk_size: chunk size of sharded weight on the given rank.
97
+ """
98
+ sharding_dim_size = local_weight.size(sharded_dim)
99
+ split_size = get_split_size(sharding_dim_size, gpu_num)
100
+ current_offsets = 0
101
+ start_pos = current_offsets
102
+ for idx, placement in enumerate(spec.placements):
103
+ chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
104
+ if rank == placement.rank():
105
+ start_pos = current_offsets
106
+ break
107
+ current_offsets += chunk_size
108
+ return start_pos, chunk_size
109
+
110
+
111
+ def clone_module_parameter(module, param_name):
112
+ """
113
+ Clone a parameter from a given existing module.
114
+
115
+ Args:
116
+ module (:class:`torch.nn.Module`): Module whose parameter needs to be cloned.
117
+ param_name (str): Name of the parameter of ``module`` that needs to be cloned.
118
+
119
+ Returns: cloned tensor as :class:`torch.nn.Parameter`.
120
+ """
121
+ tensor = getattr(module, param_name)
122
+ return torch.nn.Parameter(tensor.detach().clone())
123
+
124
+ def gen_binary_op_func(python_op, inplace=False):
125
+ src_lines = ['def f(lhs, rhs):']
126
+ if "torch" in python_op:
127
+ src_lines.append(f' return {python_op}(lhs, rhs)\n')
128
+ elif inplace:
129
+ src_lines.append(f' lhs {python_op}= rhs\n return lhs\n')
130
+ else:
131
+ src_lines.append(f' return lhs {python_op} rhs\n')
132
+
133
+ code_str = '\n'.join(src_lines)
134
+ g = {'torch': torch}
135
+ builtins.exec(code_str, g)
136
+ return g["f"]
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import copy
4
+ import random
5
+ import torch
6
+ from torch.distributed._shard import sharded_tensor
7
+
8
+ from torch.distributed._shard.sharding_spec import (
9
+ ChunkShardingSpec,
10
+ )
11
+
12
+ PLACEMENTS = [
13
+ "rank:0/cuda:0",
14
+ "rank:1/cuda:1",
15
+ "rank:2/cuda:2",
16
+ "rank:3/cuda:3",
17
+ ]
18
+
19
+ DEFAULT_GPU_NUM = 4
20
+
21
+
22
+ def _chunk_sharding_specs_list_for_test(sharding_dims, seed=0):
23
+ spec_list = []
24
+ for i in range(len(sharding_dims)):
25
+ random.Random(seed + i).shuffle(PLACEMENTS)
26
+ spec_list.append(
27
+ ChunkShardingSpec(
28
+ dim=sharding_dims[i],
29
+ placements=copy.deepcopy(PLACEMENTS),
30
+ )
31
+ )
32
+ return spec_list
33
+
34
+ class MyShardedModel2(torch.nn.Module):
35
+ def __init__(
36
+ self,
37
+ spec=None,
38
+ group=None,
39
+ init_rrefs=True
40
+ ) -> None:
41
+ super().__init__()
42
+ if spec is not None:
43
+ self.sharded_tensor2 = sharded_tensor.rand(
44
+ spec, 10, 20, process_group=group, init_rrefs=init_rrefs
45
+ )
46
+ else:
47
+ self.sharded_tensor2 = None
48
+ self.random_tensor2 = torch.nn.Parameter(torch.rand(2, 2))
49
+
50
+
51
+ class MyShardedModel1(torch.nn.Module):
52
+ def __init__(
53
+ self,
54
+ spec=None,
55
+ group=None,
56
+ init_rrefs=True
57
+ ) -> None:
58
+ super().__init__()
59
+ if spec is not None:
60
+ self.sharded_tensor1 = sharded_tensor.rand(
61
+ spec, 10, 20, process_group=group, init_rrefs=init_rrefs
62
+ )
63
+ else:
64
+ self.sharded_tensor1 = None
65
+ self.random_tensor1 = torch.nn.Parameter(torch.rand(2, 2))
66
+ self.submodule = MyShardedModel2(spec, group, init_rrefs)
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
7
+
8
+
9
+ class SimpleMegatronLM(nn.Module):
10
+ def __init__(self, linear_size, rank=None, dtype=torch.float32):
11
+ super().__init__()
12
+ self.fc1 = nn.Linear(*linear_size[0], dtype=dtype)
13
+ self.gelu = nn.GELU()
14
+ self.fc2 = nn.Linear(*linear_size[1], dtype=dtype)
15
+ if rank is not None:
16
+ self.fc1.cuda(rank)
17
+ self.fc2.cuda(rank)
18
+
19
+ def forward(self, inp):
20
+ return self.fc2(self.gelu(self.fc1(inp)))
21
+
22
+ def get_weights(self):
23
+ if isinstance(self.fc1.weight, ShardedTensor):
24
+ weight1 = self.fc1.weight.local_tensor()
25
+ else:
26
+ weight1 = self.fc1.weight
27
+
28
+ if isinstance(self.fc2.weight, ShardedTensor):
29
+ weight2 = self.fc2.weight.local_tensor()
30
+ else:
31
+ weight2 = self.fc2.weight
32
+
33
+ return (weight1, weight2)
34
+
35
+ def get_biases(self):
36
+ return (self.fc1.bias, self.fc2.bias)
37
+
38
+ def get_weight_grads(self):
39
+ return (self.fc1.weight.grad, self.fc2.weight.grad)
40
+
41
+ def get_bias_grads(self):
42
+ return (self.fc1.bias.grad, self.fc2.bias.grad)
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py ADDED
@@ -0,0 +1,548 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ # Copyright (c) Meta Platforms, Inc. and affiliates
4
+
5
+ import itertools
6
+ import sys
7
+ from dataclasses import dataclass
8
+ from functools import wraps
9
+ from typing import Any, Callable, cast, Dict, Iterator, List, Sequence, Tuple, TypeVar
10
+
11
+ import torch
12
+ import torch.distributed as dist
13
+ import torch.nn as nn
14
+ import torch.nn.functional as F
15
+
16
+ from torch.distributed._tensor import DeviceMesh, distribute_tensor, Replicate, Shard
17
+ from torch.distributed._tensor.placement_types import Placement
18
+ from torch.distributed.tensor.parallel import (
19
+ ColwiseParallel,
20
+ parallelize_module,
21
+ PrepareModuleInput,
22
+ RowwiseParallel,
23
+ SequenceParallel,
24
+ )
25
+ from torch.testing._internal.common_distributed import (
26
+ MultiProcessTestCase,
27
+ MultiThreadedTestCase,
28
+ skip_if_lt_x_gpu,
29
+ run_subtests,
30
+ TEST_SKIPS,
31
+ )
32
+
33
+ from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec
34
+
35
+ DEVICE_TYPE = (
36
+ "cuda" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else "cpu"
37
+ )
38
+
39
+ NUM_DEVICES = 4
40
+
41
+ # We use this as a proxy for "multiple GPUs exist"
42
+ if torch.cuda.is_available() and torch.cuda.device_count() > 1:
43
+ # when we actually have multiple GPUs, relax the requirement to smaller counts.
44
+ NUM_DEVICES = min(NUM_DEVICES, torch.cuda.device_count())
45
+
46
+ T = TypeVar("T")
47
+
48
+
49
+ # simple RMSNorm layer for testing
50
+ class RMSNormPython(torch.nn.Module):
51
+ def __init__(self, dim: int, eps: float = 1e-6):
52
+ super().__init__()
53
+ self.eps = eps
54
+ self.weight = torch.nn.Parameter(torch.ones(dim))
55
+
56
+ def _norm(self, x):
57
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
58
+
59
+ def forward(self, x):
60
+ output = self._norm(x)
61
+ return output * self.weight
62
+
63
+
64
+ class MLPModule(nn.Module):
65
+ def __init__(self, device, bias: bool = True):
66
+ super().__init__()
67
+ torch.manual_seed(5)
68
+ self.net1 = nn.Linear(10, 16, bias=bias, device=device)
69
+ self.relu = nn.ReLU()
70
+ self.net2 = nn.Linear(16, 10, bias=bias, device=device)
71
+
72
+ def forward(self, x):
73
+ return self.net2(self.relu(self.net1(x)))
74
+
75
+ def reset_parameters(self):
76
+ self.net1.reset_parameters()
77
+ self.net2.reset_parameters()
78
+
79
+
80
+ class MLPStacked(nn.Module):
81
+ def __init__(self, device, n_layers: int = 2):
82
+ super().__init__()
83
+ self.layers = nn.ModuleList([MLPModule(device) for i in range(n_layers)])
84
+
85
+ def forward(self, x):
86
+ for layer in self.layers:
87
+ x = layer(x)
88
+ return x
89
+
90
+
91
+ @dataclass
92
+ class ModelArgs:
93
+ n_layers: int = 2
94
+ vocab_size: int = 8
95
+ max_seq_len: int = 16
96
+ dim: int = 16
97
+ n_heads: int = 4
98
+ dropout_p: float = 0.1
99
+ use_attn_mask: bool = True
100
+ weight_tying: bool = True
101
+ checkpoint_activations: bool = False
102
+
103
+
104
+ class Attention(nn.Module):
105
+ def __init__(self, args: ModelArgs):
106
+ super().__init__()
107
+ assert args.dim % args.n_heads == 0
108
+ self.head_dim = args.dim // args.n_heads
109
+ self.n_heads = args.n_heads
110
+ self.dropout_p = args.dropout_p
111
+ self.resid_dropout = nn.Dropout(args.dropout_p)
112
+ self.use_attn_mask = args.use_attn_mask
113
+
114
+ self.wq = nn.Linear(args.dim, args.dim, bias=False)
115
+ self.wk = nn.Linear(args.dim, args.dim, bias=False)
116
+ self.wv = nn.Linear(args.dim, args.dim, bias=False)
117
+ self.wo = nn.Linear(args.dim, args.dim, bias=False)
118
+
119
+ def forward(self, x):
120
+ bsz, seq_len, _ = x.size()
121
+ queries, keys, values = self.wq(x), self.wk(x), self.wv(x)
122
+ queries = queries.view(bsz, seq_len, self.n_heads, self.head_dim)
123
+ keys = keys.view(bsz, seq_len, self.n_heads, self.head_dim)
124
+ values = values.view(bsz, seq_len, self.n_heads, self.head_dim)
125
+
126
+ queries = queries.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim)
127
+ keys = keys.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim)
128
+ values = values.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim)
129
+
130
+ output = F.scaled_dot_product_attention(
131
+ queries,
132
+ keys,
133
+ values,
134
+ None,
135
+ self.dropout_p if self.training else 0,
136
+ self.use_attn_mask,
137
+ )
138
+ output = output.transpose(1, 2).contiguous().view(bsz, seq_len, -1)
139
+ return self.resid_dropout(self.wo(output))
140
+
141
+
142
+ class FeedForward(nn.Module):
143
+ def __init__(self, dim, hidden_dim, dropout_p):
144
+ super().__init__()
145
+ self.w1 = nn.Linear(dim, hidden_dim)
146
+ self.gelu = nn.GELU()
147
+ self.w2 = nn.Linear(hidden_dim, dim)
148
+ self.resid_dropout = nn.Dropout(dropout_p)
149
+
150
+ def forward(self, x):
151
+ return self.resid_dropout(self.w2(self.gelu(self.w1(x))))
152
+
153
+
154
+ class TransformerBlock(nn.Module):
155
+ def __init__(self, args: ModelArgs):
156
+ super().__init__()
157
+ self.attention_norm = nn.LayerNorm(args.dim)
158
+ self.attention = Attention(args)
159
+ self.ffn_norm = nn.LayerNorm(args.dim)
160
+ self.feed_forward = FeedForward(
161
+ args.dim, hidden_dim=4 * args.dim, dropout_p=args.dropout_p
162
+ )
163
+
164
+ def forward(self, x):
165
+ h = x + self.attention(self.attention_norm(x))
166
+ out = h + self.feed_forward(self.ffn_norm(h))
167
+ return out
168
+
169
+
170
+ # A toy transformer model, partly inspired by the nanoGPT model:
171
+ # https://github.com/karpathy/nanoGPT.
172
+ class Transformer(nn.Module):
173
+ def __init__(self, args: ModelArgs):
174
+ super().__init__()
175
+ assert args.vocab_size is not None
176
+ assert args.max_seq_len is not None
177
+ self.model_args = args
178
+ self.max_seq_len = args.max_seq_len
179
+ self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim)
180
+ self.pos_embeddings = nn.Embedding(args.max_seq_len, args.dim)
181
+ self.dropout = nn.Dropout(args.dropout_p)
182
+ self.layers = nn.ModuleList()
183
+ for _ in range(args.n_layers):
184
+ self.layers.append(TransformerBlock(args))
185
+ self.norm = nn.LayerNorm(args.dim)
186
+ self.output = nn.Linear(args.dim, args.vocab_size, bias=False)
187
+ if args.weight_tying:
188
+ self.output.weight = self.tok_embeddings.weight
189
+ self.checkpoint_activations = args.checkpoint_activations
190
+
191
+ def forward(self, tokens):
192
+ _bsz, seq_len = tokens.size()
193
+ assert seq_len <= self.max_seq_len
194
+ h = self.tok_embeddings(tokens)
195
+ pos = torch.arange(0, seq_len, device=tokens.device)
196
+ p = self.pos_embeddings(pos) # positional embeddings of shape (seq_len, dim)
197
+ h = h + p
198
+ h = self.dropout(h)
199
+ for layer in self.layers:
200
+ if self.checkpoint_activations:
201
+ h = torch.utils.checkpoint.checkpoint(layer, h, use_reentrant=False)
202
+ else:
203
+ h = layer(h)
204
+ h = self.norm(h)
205
+ output = self.output(h).float()
206
+ return output
207
+
208
+ @staticmethod
209
+ def parallelize(
210
+ module: "Transformer", device_mesh: DeviceMesh, use_seq_parallel: bool, local_output_for_attn: bool = False
211
+ ) -> nn.Module:
212
+ assert isinstance(module, Transformer), f"Requires Transformer but got {module}"
213
+ # Parallelize the root submodules.
214
+ if use_seq_parallel:
215
+ root_plan = {
216
+ "tok_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Shard(1)),
217
+ "pos_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Shard(0)),
218
+ "norm": SequenceParallel(),
219
+ }
220
+ else:
221
+ root_plan = {
222
+ "tok_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Replicate()),
223
+ "pos_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Replicate()),
224
+ }
225
+
226
+ module_tp = parallelize_module(module, device_mesh, root_plan)
227
+ # Parallelize the attention and feed forward submodules.
228
+ for layer in module_tp.layers:
229
+ layer_parallelize_plan = {}
230
+ if use_seq_parallel:
231
+ layer_parallelize_plan["attention"] = PrepareModuleInput(
232
+ input_layouts=Shard(1),
233
+ desired_input_layouts=Replicate(),
234
+ )
235
+ # shard the RMSNorms
236
+ layer_parallelize_plan["attention_norm"] = SequenceParallel()
237
+ layer_parallelize_plan["ffn_norm"] = SequenceParallel()
238
+ layer_parallelize_plan["attention.wq"] = ColwiseParallel(use_local_output=local_output_for_attn)
239
+ layer_parallelize_plan["attention.wk"] = ColwiseParallel(use_local_output=local_output_for_attn)
240
+ layer_parallelize_plan["attention.wv"] = ColwiseParallel(use_local_output=local_output_for_attn)
241
+ layer_parallelize_plan["attention.wo"] = (
242
+ RowwiseParallel(output_layouts=Shard(1))
243
+ if use_seq_parallel
244
+ else RowwiseParallel()
245
+ )
246
+
247
+ layer_parallelize_plan["feed_forward.w1"] = (
248
+ ColwiseParallel(input_layouts=Shard(1))
249
+ if use_seq_parallel
250
+ else ColwiseParallel()
251
+ )
252
+ layer_parallelize_plan["feed_forward.w2"] = (
253
+ RowwiseParallel(output_layouts=Shard(1))
254
+ if use_seq_parallel
255
+ else RowwiseParallel()
256
+ )
257
+
258
+ parallelize_module(layer, device_mesh, layer_parallelize_plan)
259
+
260
+ # Parallelize the output submodule. If weight tying is enabled, we need to
261
+ # make sure output.weight is sharded consistently as tok_embeddings.weight,
262
+ # at the cost of the all_reduce operation using RowwiseParallel.
263
+ output_parallelize_plan = (
264
+ ColwiseParallel(
265
+ input_layouts=Shard(1),
266
+ output_layouts=Replicate(),
267
+ )
268
+ if use_seq_parallel
269
+ else ColwiseParallel(output_layouts=Replicate())
270
+ )
271
+ parallelize_module(module_tp.output, device_mesh, output_parallelize_plan)
272
+
273
+ if local_output_for_attn:
274
+ for layer in module_tp.layers:
275
+ layer.attention.n_heads = module_tp.model_args.n_heads // device_mesh.size()
276
+
277
+ # Manually set output.weight so that parameters and gradients are shared.
278
+ if module_tp.model_args.weight_tying:
279
+ module_tp.output.weight = module_tp.tok_embeddings.weight
280
+
281
+ return module_tp
282
+
283
+
284
+ def skip_unless_torch_gpu(method: T) -> T:
285
+ """
286
+ Test decorator which skips the test unless there's a GPU available to torch.
287
+
288
+ >>> # xdoctest: +SKIP
289
+ >>> @skip_unless_torch_gpu
290
+ >>> def test_some_method(self) -> None:
291
+ >>> ...
292
+ """
293
+ # The builtin @skip_if_no_gpu relies on os.environ['WORLD_SIZE'] being set.
294
+ return cast(T, skip_if_lt_x_gpu(NUM_DEVICES)(method))
295
+
296
+
297
+ class DTensorTestBase(MultiProcessTestCase):
298
+ @property
299
+ def world_size(self) -> int:
300
+ return NUM_DEVICES
301
+
302
+ @property
303
+ def backend(self) -> str:
304
+ backend = "nccl" if self.device_type == "cuda" else "gloo"
305
+ return backend
306
+
307
+ def build_device_mesh(self) -> DeviceMesh:
308
+ return DeviceMesh(self.device_type, list(range(self.world_size)))
309
+
310
+ def init_pg(self) -> None:
311
+ if "nccl" in self.backend and torch.cuda.device_count() < self.world_size:
312
+ sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
313
+
314
+ if self.backend not in ["nccl", "gloo", "mpi", "cpu:gloo,cuda:nccl"]:
315
+ raise RuntimeError(f"Backend {self.backend} not supported!")
316
+
317
+ dist.init_process_group(
318
+ backend=self.backend,
319
+ world_size=self.world_size,
320
+ rank=self.rank, # pyre-ignore[16]
321
+ init_method=f"file://{self.file_name}", # pyre-ignore[16]
322
+ )
323
+
324
+ # set device for nccl pg for collectives
325
+ if "nccl" in self.backend:
326
+ torch.cuda.set_device(self.rank)
327
+
328
+ def destroy_pg(self) -> None:
329
+ # Wait for all ranks to reach here before starting shutdown.
330
+ # FIXME dist.barrier deadlocks with multiple threads and NCCL: https://github.com/pytorch/pytorch/issues/95895
331
+ # dist.all_reduce(torch.zeros((1,), device="cuda" if torch.cuda.is_available() else "cpu"))
332
+ # FIXME can't use the above all_reduce as it causes hangs on bionic and focal. It hangs:
333
+ # test_dtensor.py -- DTensorMeshTest.test_dtensor_device_mesh_device_conversion
334
+ dist.barrier()
335
+ dist.destroy_process_group()
336
+
337
+ def setUp(self) -> None:
338
+ super().setUp()
339
+ self._spawn_processes()
340
+
341
+ # pyre-ignore[2]:
342
+ def _test_op(self, mesh: DeviceMesh, op_call, *args, **kwargs) -> None:
343
+ out = op_call(*args, **kwargs)
344
+ dtc = DTensorConverter(mesh, args, kwargs)
345
+ for d_args, d_kwargs in dtc:
346
+ # pyre can't find assertTrue anymore?
347
+ self.assertEqual(dtc.successful(), True)
348
+ d_out = op_call(*d_args, **d_kwargs)
349
+ self.assertEqual(d_out.full_tensor(), out)
350
+
351
+ def run_subtests(self, *args, **kwargs):
352
+ return run_subtests(self, *args, **kwargs)
353
+
354
+
355
+ TestFunc = Callable[[object], object]
356
+
357
+
358
+ # wrapper to initialize comms (processgroup)
359
+ def with_comms(func: TestFunc) -> TestFunc:
360
+ assert func is not None
361
+
362
+ @wraps(func) # pyre-ignore[6]
363
+ def wrapper(
364
+ self, *args: Tuple[object], **kwargs: Dict[str, Any] # type: ignore[misc]
365
+ ) -> None:
366
+ # if enough GPU we can use GPU, otherwise we fallback to CPU
367
+ if not torch.cuda.is_available() or torch.cuda.device_count() < self.world_size:
368
+ self.device_type = "cpu"
369
+ else:
370
+ self.device_type = DEVICE_TYPE
371
+
372
+ self.init_pg()
373
+
374
+ try:
375
+ func(self, *args, **kwargs) # type: ignore[misc]
376
+ except Exception as e:
377
+ dist.destroy_process_group()
378
+ raise e
379
+
380
+ self.destroy_pg()
381
+
382
+ return wrapper
383
+
384
+
385
+ class DTensorOpTestBase(MultiThreadedTestCase):
386
+ @property
387
+ def world_size(self) -> int:
388
+ return NUM_DEVICES
389
+
390
+ @property
391
+ def device_type(self) -> str:
392
+ return DEVICE_TYPE
393
+
394
+ def build_device_mesh(self):
395
+ return DeviceMesh(self.device_type, list(range(self.world_size)))
396
+
397
+ def setUp(self) -> None:
398
+ super().setUp()
399
+ self._spawn_threads()
400
+
401
+
402
+ # This is a class for converting args/kwargs of an op into distributed args/kwargs
403
+ class DTensorConverter:
404
+ def __init__(
405
+ self,
406
+ mesh: DeviceMesh,
407
+ args: Tuple[object, ...],
408
+ kwargs: Dict[str, object],
409
+ ) -> None:
410
+ self.hit = 0
411
+ self.miss = 0
412
+ self.mesh = mesh
413
+ self.args = args
414
+ self.kwargs = kwargs
415
+ flatten_args, flatten_args_spec = tree_flatten(args)
416
+ flatten_kwargs, flatten_kwargs_spec = tree_flatten(kwargs)
417
+
418
+ self.flatten_args: List[object] = flatten_args
419
+ self.flatten_args_spec: TreeSpec = flatten_args_spec
420
+ self.flatten_kwargs: List[object] = flatten_kwargs
421
+ self.flatten_kwargs_spec: TreeSpec = flatten_kwargs_spec
422
+
423
+ choices_for_args = []
424
+ for arg in self.flatten_args:
425
+ if isinstance(arg, torch.Tensor):
426
+ choices_for_args.append(self.gen_sharding_choices_for_arg(arg))
427
+
428
+ for arg in self.flatten_kwargs:
429
+ if isinstance(arg, torch.Tensor):
430
+ choices_for_args.append(self.gen_sharding_choices_for_arg(arg))
431
+
432
+ self.sharding_combs: Iterator[Sequence[Placement]] = iter(
433
+ itertools.product(*choices_for_args)
434
+ )
435
+
436
+ def successful(self) -> bool:
437
+ return self.hit > 0 and self.miss == 0
438
+
439
+ def is_supported_tensor(self, t: torch.Tensor) -> bool:
440
+ # TODO: dist tensor need to support quantized and sparse
441
+ # tensors, quantized tensor might be relatively easy, but
442
+ # sparse tensor have special layouts that we need to possibly
443
+ # deal with, until we are clear about them, we don't officially
444
+ # support them.
445
+ return not any(
446
+ [
447
+ t.is_sparse_csr,
448
+ t.is_sparse,
449
+ t.is_mkldnn,
450
+ t.is_quantized,
451
+ t.is_nested,
452
+ torch._is_functional_tensor(t),
453
+ t.is_neg(),
454
+ t.is_conj(),
455
+ t.device.type in ("lazy", "meta"),
456
+ # We need a way to test if a tensor is batched but there
457
+ # is no official APi to do it
458
+ # torch._C._is_batched(t),
459
+ ]
460
+ )
461
+
462
+ def gen_sharding_choices_for_arg(self, arg: torch.Tensor) -> Sequence[Placement]:
463
+ mesh_size = self.mesh.size()
464
+ sharding_choices: List[Placement] = [Replicate()]
465
+ # c10d collective does not support bool tensor
466
+ # for bool tensor we treat it as replicated
467
+ if arg.dtype != torch.bool:
468
+ # only generating choices with: replicate, or sharding
469
+ # evenly on a dimension that could be sharded
470
+ sharding_choices = sharding_choices + [
471
+ Shard(i)
472
+ for i, s in enumerate(arg.shape)
473
+ if s > 1 and s % mesh_size == 0
474
+ ]
475
+ # TODO: add multi mesh choices
476
+ # all_choices = itertools.product(
477
+ # *(self.mesh.ndim * [sharding_choices])
478
+ # )
479
+ return sharding_choices
480
+
481
+ def __iter__(self) -> "DTensorConverter":
482
+ return self
483
+
484
+ def __next__(self) -> Tuple[Tuple[object, ...], Dict[str, object]]:
485
+ try:
486
+ next_sharding_choices = next(self.sharding_combs)
487
+ idx = 0
488
+
489
+ new_args: List[object] = []
490
+ for arg in self.flatten_args:
491
+ if isinstance(arg, torch.Tensor):
492
+ new_args.append(
493
+ self.to_dist_tensor(
494
+ arg, self.mesh, [next_sharding_choices[idx]]
495
+ )
496
+ )
497
+ idx += 1
498
+ else:
499
+ new_args.append(arg)
500
+
501
+ new_kwargs: List[object] = []
502
+ for arg in self.flatten_kwargs:
503
+ if isinstance(arg, torch.Tensor):
504
+ new_kwargs.append(
505
+ self.to_dist_tensor(
506
+ arg, self.mesh, [next_sharding_choices[idx]]
507
+ )
508
+ )
509
+ idx += 1
510
+ else:
511
+ new_kwargs.append(arg)
512
+
513
+ return (
514
+ tree_unflatten(new_args, self.flatten_args_spec),
515
+ tree_unflatten(new_kwargs, self.flatten_kwargs_spec),
516
+ )
517
+ except StopIteration as e:
518
+ raise StopIteration from e
519
+
520
+ def to_dist_tensor(
521
+ self, t: torch.Tensor, mesh: DeviceMesh, placements: List[Placement]
522
+ ) -> torch.Tensor:
523
+ if type(t) is torch.Tensor or type(t) is nn.Parameter:
524
+ if self.is_supported_tensor(t):
525
+ self.hit += 1
526
+ if t.ndim == 0:
527
+ # scalar tensor by default will be replicated
528
+ r = distribute_tensor(t, mesh, [Replicate()] * mesh.ndim)
529
+ else:
530
+ # distribute non-scalar tensors
531
+ r = distribute_tensor(t, mesh, placements)
532
+ if type(t) is nn.Parameter:
533
+ r = nn.Parameter( # type: ignore[assignment]
534
+ r, requires_grad=r.requires_grad
535
+ )
536
+ return r
537
+ else:
538
+ self.miss += 1
539
+ return t
540
+ elif torch.overrides.is_tensor_like(t):
541
+ # Blindly converting tensor subclasses to dist tensor can cause
542
+ # unpredictable problems, we explicitly disable this conversion
543
+ # for now (i.e. we don't support DTensor holding tensor subclass
544
+ # until there's a strong reason later).
545
+ self.miss += 1
546
+ return t
547
+ else:
548
+ raise RuntimeError(f"Trying to convert to DTensor, but got {type(t)}")
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ # Copyright (c) Meta Platforms, Inc. and affiliates
4
+
5
+ import os
6
+ import shutil
7
+ import tempfile
8
+ from functools import wraps
9
+ from typing import Any, Callable, Dict, Optional, Tuple
10
+
11
+ import torch.distributed as dist
12
+
13
+
14
+ def with_temp_dir(
15
+ func: Optional[Callable] = None,
16
+ ) -> Optional[Callable]:
17
+ """
18
+ Wrapper to initialize temp directory for distributed checkpoint.
19
+ """
20
+ assert func is not None
21
+
22
+ @wraps(func)
23
+ def wrapper(self, *args: Tuple[object], **kwargs: Dict[str, Any]) -> None:
24
+ if dist.is_initialized():
25
+ # Only create temp_dir when rank is 0
26
+ if dist.get_rank() == 0:
27
+ temp_dir = tempfile.mkdtemp()
28
+ print(f"Using temp directory: {temp_dir}")
29
+ else:
30
+ temp_dir = ""
31
+ object_list = [temp_dir]
32
+
33
+ # Broadcast temp_dir to all the other ranks
34
+ os.sync()
35
+ dist.broadcast_object_list(object_list)
36
+ self.temp_dir = object_list[0]
37
+ os.sync()
38
+ else:
39
+ temp_dir = tempfile.mkdtemp()
40
+ print(f"No process group initialized, using temp directory: {temp_dir}")
41
+ self.temp_dir = temp_dir
42
+
43
+ try:
44
+ func(self, *args, **kwargs)
45
+ finally:
46
+ if dist.is_initialized() and dist.get_rank() == 0:
47
+ shutil.rmtree(self.temp_dir, ignore_errors=True)
48
+ else:
49
+ shutil.rmtree(self.temp_dir, ignore_errors=True)
50
+
51
+ return wrapper
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ # Owner(s): ["oncall: distributed"]
4
+
5
+ import copy
6
+ from itertools import chain
7
+ from typing import Any, Dict
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ from torch.distributed._sharded_tensor import ShardedTensor
12
+ from torch.distributed._state_dict_utils import _gather_state_dict
13
+ from torch.distributed._tensor import DTensor
14
+ from torch.distributed.checkpoint.state_dict import (
15
+ _PG,
16
+ _STATE,
17
+ set_state_dict,
18
+ StateDictOptions,
19
+ )
20
+
21
+
22
+ class VerifyStateDictMixin:
23
+ def _compare_tensor(self, orig_tensor, dist_tensor, offload_to_cpu=False):
24
+ if isinstance(dist_tensor, (DTensor, ShardedTensor)):
25
+ dist_tensor = _gather_state_dict({"mykey": dist_tensor}).pop("mykey")
26
+
27
+ if offload_to_cpu:
28
+ orig_tensor = orig_tensor.cpu()
29
+ dist_tensor = dist_tensor.cpu()
30
+ self.assertTrue(isinstance(dist_tensor, torch.Tensor))
31
+ self.assertTrue(torch.allclose(orig_tensor, dist_tensor))
32
+
33
+ def _verify_msd(
34
+ self,
35
+ msd: Dict[str, Any],
36
+ dist_msd: Dict[str, Any],
37
+ options: StateDictOptions = StateDictOptions(),
38
+ offload_to_cpu=False,
39
+ ) -> None:
40
+ if not options.ignore_frozen_params:
41
+ self.assertEqual(len(msd), len(dist_msd))
42
+ for fqn, param in msd.items():
43
+ dist_param = dist_msd.get(fqn, None)
44
+ if not options.ignore_frozen_params:
45
+ self.assertIsNotNone(dist_param, f"{fqn=}")
46
+ try:
47
+ self._compare_tensor(param, dist_param, offload_to_cpu)
48
+ except AssertionError as e:
49
+ raise AssertionError(
50
+ f"{fqn} has mismatched value {param} {dist_param}"
51
+ ) from e
52
+ elif dist_param is None:
53
+ self.assertFalse(param.requires_grad, f"{fqn=}")
54
+
55
+ def _verify_osd(
56
+ self,
57
+ model: nn.Module,
58
+ optim: torch.optim.Optimizer,
59
+ osd: Dict[str, Any],
60
+ dist_osd: Dict[str, Any],
61
+ ) -> None:
62
+ params = list(chain.from_iterable(g["params"] for g in optim.param_groups))
63
+ param_pid_mapping = dict(zip(params, range(len(params))))
64
+ fqn_pid_mapping = {}
65
+ for fqn, param in model.named_parameters():
66
+ pid = param_pid_mapping[param]
67
+ fqn_pid_mapping[fqn] = pid
68
+ fqn_pid_mapping[pid] = fqn
69
+ # Check optimizer_state_dict state
70
+
71
+ self.assertEqual(len(osd[_STATE]), len(dist_osd[_STATE]))
72
+ for pid, states in osd[_STATE].items():
73
+ fqn = fqn_pid_mapping[pid]
74
+ dist_states = dist_osd[_STATE].get(fqn, None)
75
+ self.assertIsNotNone(dist_states, fqn)
76
+ self.assertEqual(len(states), len(dist_states))
77
+ for key, state in states.items():
78
+ dist_state = states.get(key, None)
79
+ self.assertIsNotNone(dist_state)
80
+ self._compare_tensor(state, dist_state)
81
+
82
+ # Check optimizer_state_dict param_group
83
+ old_dist_osd_pg = dist_osd[_PG]
84
+ if len(osd[_PG]) != len(dist_osd[_PG]):
85
+ self.assertTrue(len(dist_osd[_PG]) > len(osd[_PG]))
86
+ new_pg = copy.deepcopy(dist_osd[_PG][0])
87
+ new_pg["params"] = []
88
+ for dist_group in dist_osd[_PG]:
89
+ new_pg["params"].extend(dist_group["params"])
90
+ dist_osd[_PG] = [new_pg]
91
+
92
+ self.assertEqual(len(osd[_PG]), len(dist_osd[_PG]))
93
+ for group, dist_group in zip(osd[_PG], dist_osd[_PG]):
94
+ self.assertEqual(len(group), len(dist_group))
95
+ for key, value in group.items():
96
+ # Below doesn't work because param_groups can have None
97
+ # values.
98
+ # dist_value = dist_group.get(key, None)
99
+ # self.assertIsNotNone(dist_value, (dist_group, group))
100
+ dist_value = dist_group[key]
101
+ if key == "params":
102
+ fqns = [fqn_pid_mapping[pid] for pid in value]
103
+ self.assertEqual(sorted(fqns), sorted(dist_value))
104
+ else:
105
+ self.assertEqual(value, dist_value)
106
+ dist_osd[_PG] = old_dist_osd_pg
107
+
108
+ def _verify_osd_by_load(
109
+ self,
110
+ model: nn.Module,
111
+ optim: torch.optim.Optimizer,
112
+ new_optim: torch.optim.Optimizer,
113
+ dist_osd: Dict[str, Any],
114
+ ) -> None:
115
+ new_dist_osd = _gather_state_dict(dist_osd)
116
+ set_state_dict(
117
+ model,
118
+ optimizers=new_optim,
119
+ model_state_dict={},
120
+ optim_state_dict=new_dist_osd,
121
+ )
122
+ self.assertEqual(optim.state_dict(), new_optim.state_dict())
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py ADDED
@@ -0,0 +1,733 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import contextlib
4
+ import enum
5
+ import logging
6
+ import os
7
+ import threading
8
+ from typing import NamedTuple
9
+
10
+ import torch
11
+ import torch.distributed as dist
12
+ import torch.distributed.autograd as dist_autograd
13
+ import torch.nn as nn
14
+ from torch.distributed import rpc
15
+ from torch.distributed.nn import RemoteModule
16
+ from torch.nn.parallel import DistributedDataParallel
17
+ from torch.testing._internal.common_distributed import (
18
+ requires_gloo,
19
+ requires_nccl,
20
+ skip_if_lt_x_gpu,
21
+ skip_if_rocm_multiprocess,
22
+ )
23
+ from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init
24
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
25
+ RpcAgentTestFixture,
26
+ )
27
+
28
+
29
+ NUM_EM_ROW = 2
30
+ D_SPARSE = 3
31
+ D_DENSE = 2
32
+ D_HID = 3
33
+ D_OUT = 1
34
+ NUM_TRAINERS = 4
35
+ # Trainers + the master + the remote worker
36
+ WORLD_SIZE = NUM_TRAINERS + 2
37
+ TRAINER_RANKS = list(range(NUM_TRAINERS))
38
+ REMOTE_WORKER_RANK = TRAINER_RANKS[-1] + 1
39
+ MASTER_RANK = REMOTE_WORKER_RANK + 1
40
+
41
+
42
+ class DdpMode(enum.Enum):
43
+ # Don't apply DDP
44
+ NONE = enum.auto()
45
+ # Apply DDP to the top level nn.Module
46
+ OUTSIDE = enum.auto()
47
+ # Embed DDP inside the top level nn.Module
48
+ INSIDE = enum.auto()
49
+
50
+
51
+ def init_logger():
52
+ logger = logging.getLogger(__name__)
53
+ level = logging.DEBUG if "debug" in os.environ else logging.INFO
54
+ logger.setLevel(level)
55
+ console = logging.StreamHandler()
56
+ formatter = logging.Formatter(
57
+ "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s"
58
+ )
59
+ console.setFormatter(formatter)
60
+ console.setLevel(level)
61
+ # add the handlers to the logger
62
+ logger.addHandler(console)
63
+ logger.propagate = False
64
+ return logger
65
+
66
+
67
+ gLogger = init_logger()
68
+
69
+
70
+ class FeatureSet(NamedTuple):
71
+ """ A feature set has 2 types of features"""
72
+
73
+ dense_features: torch.Tensor
74
+ sparse_features: torch.LongTensor
75
+ values: torch.Tensor
76
+
77
+
78
+ def _call_method(method, rref, *args, **kwargs):
79
+ return method(rref.local_value(), *args, **kwargs)
80
+
81
+
82
+ def _remote_method(method, rref, *args, **kwargs):
83
+ args_tup = tuple([method, rref] + list(args))
84
+ return rpc.rpc_sync(rref.owner(), _call_method, args=args_tup, kwargs=kwargs)
85
+
86
+
87
+ def _remote_method_async(method, rref, *args, **kwargs):
88
+ args_tup = tuple([method, rref] + list(args))
89
+ return rpc.rpc_async(rref.owner(), _call_method, args=args_tup, kwargs=kwargs)
90
+
91
+
92
+ class RemoteEM(nn.Module):
93
+ def __init__(self, num_embeddings: int, embedding_dim: int):
94
+ gLogger.info("Initing RemoteEM with %s %s", num_embeddings, embedding_dim)
95
+ super().__init__()
96
+ init_em = [0.5] * embedding_dim
97
+ self.em = nn.EmbeddingBag(
98
+ num_embeddings,
99
+ embedding_dim,
100
+ _weight=torch.tensor([init_em] * num_embeddings),
101
+ )
102
+
103
+ def forward(self, input: torch.Tensor):
104
+ gLogger.debug("Running RemoteEM.forward() on: %s", input)
105
+ return self.em(input, offsets=torch.LongTensor(range(input.shape[0])))
106
+
107
+
108
+ # Return a linear module with predefined parameters.
109
+ def getLinear(d_in, d_out):
110
+ l = nn.Linear(d_in, d_out, bias=False)
111
+ w = torch.ones((d_out, d_in))
112
+ w[0][0] = -1
113
+ w.requires_grad_()
114
+ l.weight.data = w
115
+ return l
116
+
117
+
118
+ class RemoteNet(nn.Module):
119
+ def __init__(self, d_in: int, d_out: int):
120
+ gLogger.info("Initing RemoteNet with %s %s", d_in, d_out)
121
+ super().__init__()
122
+ self.fc = getLinear(d_in, d_out)
123
+ self.relu = nn.ReLU()
124
+
125
+ def forward(self, input: torch.Tensor):
126
+ gLogger.debug("Running RemoteNet.forward() on: %s", input)
127
+ return self.relu(self.fc(input))
128
+
129
+
130
+ class HybridModel(nn.Module):
131
+ def __init__(
132
+ self,
133
+ remote_em_rref: rpc.RRef,
134
+ remote_net_rref: rpc.RRef,
135
+ process_group_for_ddp: dist.ProcessGroup = None,
136
+ ):
137
+ super().__init__()
138
+ self.remote_em_rref = remote_em_rref
139
+ self.remote_net_rref = remote_net_rref
140
+ self.fc1 = getLinear(D_DENSE, D_DENSE)
141
+ self.fc2 = getLinear(D_HID, D_OUT)
142
+
143
+ self.non_ddp_params = tuple(self.fc1.parameters()) + tuple(
144
+ self.fc2.parameters()
145
+ )
146
+ self.ddp_params = ()
147
+
148
+ if process_group_for_ddp is not None:
149
+ self.non_ddp_params, self.ddp_params = (
150
+ tuple(self.fc1.parameters()),
151
+ tuple(self.fc2.parameters()),
152
+ )
153
+ gLogger.info("Use DDP for the second local net.")
154
+ self.fc2 = DistributedDataParallel(
155
+ self.fc2, check_reduction=True, process_group=process_group_for_ddp
156
+ )
157
+
158
+ gLogger.info(
159
+ "HybridModel has %s groups of parameters.", len(list(self.parameters()))
160
+ )
161
+
162
+ def forward(self, input: FeatureSet):
163
+ gLogger.debug("Running HybridModel.forward on %s", input)
164
+ sparse = _remote_method(
165
+ RemoteEM.forward, self.remote_em_rref, input.sparse_features
166
+ )
167
+ # The same size of mini batch.
168
+ assert sparse.shape[0] == input.dense_features.shape[0]
169
+ dense = self.fc1(input.dense_features)
170
+ x = torch.cat((dense, sparse), 1)
171
+ gLogger.debug("Concatenated feature: %s", x)
172
+ x = _remote_method(RemoteNet.forward, self.remote_net_rref, x)
173
+ return self.fc2(x)
174
+
175
+
176
+ class Trainer:
177
+ def __init__(
178
+ self,
179
+ remote_em_rref: rpc.RRef,
180
+ remote_net_rref: rpc.RRef,
181
+ ddp_mode: DdpMode,
182
+ rank: int,
183
+ ):
184
+ self.rank = rank
185
+ self.trainer_group = (
186
+ dist.new_group(TRAINER_RANKS)
187
+ if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE)
188
+ else None
189
+ )
190
+ self.remote_em_rref = remote_em_rref
191
+ self.remote_net_rref = remote_net_rref
192
+ self.hybrid_module = HybridModel(
193
+ self.remote_em_rref,
194
+ self.remote_net_rref,
195
+ self.trainer_group if ddp_mode in (DdpMode.INSIDE,) else None,
196
+ )
197
+ self.ddp_params, self.non_ddp_params = (
198
+ self.hybrid_module.ddp_params,
199
+ self.hybrid_module.non_ddp_params,
200
+ )
201
+ if ddp_mode == DdpMode.OUTSIDE:
202
+ gLogger.info("Wrapping the whole hybrid module into DDP.")
203
+ self.ddp_params += self.non_ddp_params
204
+ self.non_ddp_params = ()
205
+ self.hybrid_module = DistributedDataParallel(
206
+ self.hybrid_module,
207
+ check_reduction=True,
208
+ process_group=self.trainer_group,
209
+ )
210
+ gLogger.info(
211
+ "Succeeded in creating a HybridModel instance with "
212
+ "%s ddp params and %s other local params.",
213
+ len(self.ddp_params), len(self.non_ddp_params)
214
+ )
215
+
216
+ def destroy_pg(self):
217
+ if self.trainer_group:
218
+ dist.destroy_process_group(self.trainer_group)
219
+
220
+ def train_batch(
221
+ self,
222
+ mini_batch: FeatureSet,
223
+ trainer_has_less_inputs: bool,
224
+ simulate_uneven_inputs: bool,
225
+ ):
226
+ grads_dict = None
227
+
228
+ if not simulate_uneven_inputs:
229
+ input_batches = [mini_batch]
230
+ else:
231
+ # Split into microbatches, and trim to simulate uneven inputs.
232
+ dense_features = mini_batch.dense_features
233
+ sparse_features = mini_batch.sparse_features
234
+ values = mini_batch.values
235
+
236
+ dense_microbatch = torch.split(dense_features, 2)
237
+ sparse_microbatch = torch.split(sparse_features, 2)
238
+ values_microbatch = torch.split(values, 2)
239
+ batches = []
240
+ for d, s, v in zip(dense_microbatch, sparse_microbatch, values_microbatch):
241
+ feature_set = FeatureSet(dense_features=d, sparse_features=s, values=v)
242
+ batches.append(feature_set)
243
+
244
+ if trainer_has_less_inputs:
245
+ input_batches = batches[: len(batches) // 2]
246
+ gLogger.info(
247
+ "Trainer reduced input patches from %s "
248
+ "to %s to simulate uneven inputs.",
249
+ len(batches), len(input_batches)
250
+ )
251
+ else:
252
+ input_batches = batches
253
+
254
+ with self.hybrid_module.join() if simulate_uneven_inputs else contextlib.nullcontext():
255
+ for b in input_batches:
256
+ with dist_autograd.context() as context_id:
257
+ output = self.hybrid_module.forward(b)
258
+ loss = (output * mini_batch.values).sum()
259
+ dist_autograd.backward(context_id, [loss])
260
+ grads_dict = dist_autograd.get_gradients(context_id)
261
+ gLogger.info(
262
+ "Loss is %s for mini batch: %s. "
263
+ "Grads dict has %s entries: %s", loss, mini_batch, len(grads_dict), grads_dict
264
+ )
265
+ return (
266
+ tuple(grads_dict[param] for param in self.ddp_params),
267
+ tuple(grads_dict[param] for param in self.non_ddp_params),
268
+ )
269
+
270
+
271
+ def get_training_examples():
272
+ n = 16
273
+ training_examples = FeatureSet(
274
+ dense_features=torch.zeros((n, D_DENSE)),
275
+ sparse_features=torch.zeros(n, dtype=torch.long),
276
+ values=torch.zeros(n),
277
+ )
278
+ idx = 0
279
+ # Every example has another one that has exactly the same features but an
280
+ # opposite value. Therefore, their grads cancel each other in all-reduce.
281
+ for value in (-1, 1):
282
+ for x in (-1.0 * value, 1.0 * value):
283
+ for y in (1.0 * value, -1.0 * value):
284
+ for z in (0, 1):
285
+ training_examples.dense_features[idx, :] = torch.tensor((x, y))
286
+ training_examples.sparse_features[idx] = z
287
+ training_examples.values[idx] = value
288
+ idx += 1
289
+
290
+ # Split the examples among NUM_TRAINERS trainers
291
+ assert 0 == (n % NUM_TRAINERS)
292
+ examples_per_trainer = int(n / NUM_TRAINERS)
293
+ return [
294
+ FeatureSet(
295
+ dense_features=training_examples.dense_features[
296
+ start : start + examples_per_trainer, :
297
+ ],
298
+ sparse_features=training_examples.sparse_features[
299
+ start : start + examples_per_trainer
300
+ ],
301
+ values=training_examples.values[start : start + examples_per_trainer],
302
+ )
303
+ for start in range(0, n, examples_per_trainer)
304
+ ]
305
+
306
+
307
+ shutdown_signal = threading.Condition()
308
+
309
+
310
+ def set_shutdown_signal():
311
+ global shutdown_signal
312
+ with shutdown_signal:
313
+ shutdown_signal.notify()
314
+
315
+
316
+ class DdpUnderDistAutogradTest(RpcAgentTestFixture):
317
+ @property
318
+ def world_size(self) -> int:
319
+ return WORLD_SIZE
320
+
321
+ def remote_worker_name(self) -> str:
322
+ # The name has to be consistent with that in 'dist_init' decorator.
323
+ return f"worker{REMOTE_WORKER_RANK}"
324
+
325
+ def trainer_name(self, rank):
326
+ # The name has to be consistent with that in 'dist_init' decorator.
327
+ return f"worker{rank}"
328
+
329
+ def _remote_worker_process(self, ddp_mode):
330
+ gLogger.info("The remote worker is running.")
331
+ dist.init_process_group(
332
+ backend="gloo",
333
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
334
+ world_size=self.world_size,
335
+ rank=self.rank,
336
+ )
337
+
338
+ if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE):
339
+ # new_group needs to be called on ranks.
340
+ dist.new_group(TRAINER_RANKS)
341
+
342
+ global shutdown_signal
343
+ with shutdown_signal:
344
+ shutdown_signal.wait()
345
+ gLogger.info("Exiting remote worker.")
346
+ dist.destroy_process_group()
347
+
348
+ def _trainer_process(self, rank: int):
349
+ gLogger.info("Running the trainer #%s...", rank)
350
+ gLogger.info(
351
+ "Initing trainer process group by trainer #%s with ranks %s", rank, TRAINER_RANKS
352
+ )
353
+ dist.init_process_group(
354
+ backend="gloo",
355
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
356
+ world_size=self.world_size,
357
+ rank=self.rank,
358
+ )
359
+
360
+ gLogger.info("Waiting for shutdown signal on trainer #%s...", rank)
361
+
362
+ global shutdown_signal
363
+ with shutdown_signal:
364
+ shutdown_signal.wait()
365
+ gLogger.info("Exiting the trainer #%s...", rank)
366
+ dist.destroy_process_group()
367
+
368
+ def _master_process(self, ddp_mode: DdpMode, simulate_uneven_inputs: bool):
369
+ gLogger.info("Running the master process...")
370
+ dist.init_process_group(
371
+ backend="gloo",
372
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
373
+ world_size=self.world_size,
374
+ rank=self.rank,
375
+ )
376
+
377
+ remote_em_rref = rpc.remote(
378
+ self.remote_worker_name(), RemoteEM, args=(NUM_EM_ROW, D_SPARSE)
379
+ )
380
+ remote_net_rref = rpc.remote(
381
+ self.remote_worker_name(), RemoteNet, args=(D_DENSE + D_SPARSE, D_HID)
382
+ )
383
+ gLogger.info("Created remote rrefs on master")
384
+ self.do_test_on_master(
385
+ ddp_mode, simulate_uneven_inputs, remote_em_rref, remote_net_rref
386
+ )
387
+
388
+ def do_test_on_master(
389
+ self,
390
+ ddp_mode: DdpMode,
391
+ simulate_uneven_inputs: bool,
392
+ remote_em_rref: rpc.RRef,
393
+ remote_net_rref: rpc.RRef,
394
+ ):
395
+ if simulate_uneven_inputs:
396
+ gLogger.info(
397
+ "Running DDP + RPC test with simulating uneven inputs across trainers."
398
+ )
399
+
400
+ trainer_rrefs = []
401
+ for rank in TRAINER_RANKS:
402
+ trainer = self.trainer_name(rank)
403
+ trainer_rrefs.append(
404
+ rpc.remote(
405
+ trainer,
406
+ Trainer,
407
+ args=(remote_em_rref, remote_net_rref, ddp_mode, rank),
408
+ )
409
+ )
410
+
411
+ if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE):
412
+ # new_group needs to be called on ranks.
413
+ dist.new_group(TRAINER_RANKS)
414
+
415
+ training_examples = get_training_examples()
416
+ for _ in range(3):
417
+ futures = []
418
+ num_trainers = len(trainer_rrefs)
419
+ for idx, trainer_rref in enumerate(trainer_rrefs):
420
+ # Half the trainers will deplete inputs earlier than the rest.
421
+ trainer_has_less_inputs = (
422
+ simulate_uneven_inputs and idx < num_trainers // 2
423
+ )
424
+ futures.append(
425
+ _remote_method_async(
426
+ Trainer.train_batch,
427
+ trainer_rref,
428
+ training_examples[idx],
429
+ trainer_has_less_inputs,
430
+ simulate_uneven_inputs,
431
+ )
432
+ )
433
+
434
+ for future in futures:
435
+ ddp_grads, non_ddp_grads = future.wait()
436
+ # When there are uneven inputs, it is not necessary that grads
437
+ # cancel each other out, since some trainers contribute 0 grad.
438
+ if not simulate_uneven_inputs:
439
+ for grad in ddp_grads:
440
+ self.assertEqual(
441
+ grad,
442
+ torch.zeros_like(grad),
443
+ msg=f"The grad for any ddp parameter should be zeros, because "
444
+ "the training examples' grads cancel each other. Received "
445
+ f"gradient {grad}",
446
+ )
447
+ for grad in non_ddp_grads:
448
+ self.assertNotEqual(
449
+ grad,
450
+ torch.zeros_like(grad),
451
+ msg="The grad for any non-ddp parameter shouldn't be zeros",
452
+ )
453
+
454
+ # Destroy process groups
455
+ for idx, trainer_rref in enumerate(trainer_rrefs):
456
+ _remote_method_async(Trainer.destroy_pg, trainer_rref).wait()
457
+
458
+ # Send shutdown signals.
459
+ for rank in TRAINER_RANKS:
460
+ trainer = self.trainer_name(rank)
461
+ rpc.rpc_sync(trainer, set_shutdown_signal, args=())
462
+
463
+ rpc.rpc_sync(self.remote_worker_name(), set_shutdown_signal, args=())
464
+
465
+ def _do_test(self, ddp_mode, simulate_uneven_inputs=False):
466
+ if self.rank == MASTER_RANK:
467
+ self._master_process(ddp_mode, simulate_uneven_inputs)
468
+ elif self.rank == REMOTE_WORKER_RANK:
469
+ self._remote_worker_process(ddp_mode)
470
+ elif self.rank in TRAINER_RANKS:
471
+ self._trainer_process(self.rank)
472
+ else:
473
+ raise RuntimeError(f"Unknown process rank: {self.rank}")
474
+
475
+ @requires_gloo()
476
+ @dist_init
477
+ def test_backward_no_ddp(self):
478
+ self._do_test(DdpMode.NONE)
479
+
480
+ @requires_gloo()
481
+ @dist_init
482
+ def test_backward_ddp_outside(self):
483
+ self._do_test(DdpMode.OUTSIDE)
484
+
485
+ @requires_gloo()
486
+ @dist_init
487
+ def test_backward_ddp_outside_uneven_inputs(self):
488
+ self._do_test(DdpMode.OUTSIDE, simulate_uneven_inputs=True)
489
+
490
+ @requires_gloo()
491
+ @dist_init
492
+ def test_backward_ddp_inside(self):
493
+ self._do_test(DdpMode.INSIDE)
494
+
495
+
496
+ # Common utils for both CPU and CUDA test suites
497
+ class CommonDdpComparisonTest(RpcAgentTestFixture):
498
+ @property
499
+ def world_size(self) -> int:
500
+ return NUM_TRAINERS
501
+
502
+ def trainer_name(self, rank):
503
+ # The name has to be consistent with that in 'dist_init' decorator.
504
+ return f"worker{rank}"
505
+
506
+ @staticmethod
507
+ def get_remote_grads(rref, context_id):
508
+ return dist_autograd.get_gradients(context_id)[rref.local_value().weight]
509
+
510
+
511
+ class DdpComparisonTest(CommonDdpComparisonTest):
512
+ def _run_test_ddp_comparision(self, simulate_uneven_inputs=False):
513
+ gLogger.info("Running trainer rank: %s", self.rank)
514
+ # Each trainer uses a different random seed. Otherwise, they are going
515
+ # to have exactly the same initial model parameters, input, and
516
+ # therefore grads. That means the grads will be the same before and
517
+ # after DDP's all-reduce.
518
+ torch.manual_seed(self.rank)
519
+ dist.init_process_group(
520
+ backend="gloo",
521
+ # Postfix file_name with "pg" since file_name is also used by RPC agent
522
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=f"{self.file_name}_pg"),
523
+ world_size=self.world_size,
524
+ rank=self.rank,
525
+ )
526
+ net = nn.Linear(2, 3)
527
+ ddp_net = DistributedDataParallel(net)
528
+
529
+ # Odd ranks join early if simulate_uneven_inputs.
530
+ num_inputs = 1
531
+ if simulate_uneven_inputs:
532
+ if self.rank % 2 == 0:
533
+ num_inputs += 2
534
+ inputs_list = [torch.rand((3, 2)) for _ in range(num_inputs)]
535
+
536
+ if simulate_uneven_inputs:
537
+ gLogger.info("Rank %s training with %s inputs.", self.rank, len(inputs_list))
538
+
539
+ # Use distributed autograd. The gradients will be in RPC context map.
540
+ grads_dict = {}
541
+ with ddp_net.join(simulate_uneven_inputs):
542
+ for i, inputs in enumerate(inputs_list):
543
+ with dist_autograd.context() as context_id:
544
+ loss = ddp_net(inputs).norm()
545
+ dist_autograd.backward(context_id, [loss])
546
+ grads_dict = dist_autograd.get_gradients(context_id)
547
+ gLogger.info("Trainer #%s got grad dict: %s", self.rank, grads_dict)
548
+
549
+ # Use local autograd. The gradients will be in each variable's '.grad'.
550
+ ddp_net.zero_grad()
551
+ loss = ddp_net(inputs).norm()
552
+ loss.backward()
553
+
554
+ # The gradients should be the same
555
+ for param in net.parameters():
556
+ self.assertTrue(
557
+ param in grads_dict,
558
+ msg=f"Param {param} is not in dist_auto grad dict {grads_dict} for iteration {i}",
559
+ )
560
+ self.assertEqual(
561
+ grads_dict[param],
562
+ param.grad,
563
+ msg=f"The grads for param {param} are different under local "
564
+ f"and dist autograd: {param.grad} \n---\n {grads_dict[param]} for iteration {i}",
565
+ )
566
+ dist.destroy_process_group()
567
+
568
+ @requires_gloo()
569
+ @dist_init
570
+ def test_ddp_comparison(self):
571
+ self._run_test_ddp_comparision()
572
+
573
+ @requires_gloo()
574
+ @dist_init
575
+ def test_ddp_comparison_uneven_inputs(self):
576
+ # test with simulating uneven inputs in DDP
577
+ self._run_test_ddp_comparision(simulate_uneven_inputs=True)
578
+
579
+ @requires_gloo()
580
+ @dist_init
581
+ def test_ddp_dist_autograd_sparse_grads(self):
582
+ # Each trainer uses a different random seed. Otherwise, they are going
583
+ # to have exactly the same initial model parameters, input, and
584
+ # therefore grads. That means the grads will be the same before and
585
+ # after DDP's all-reduce.
586
+ torch.manual_seed(self.rank)
587
+ dist.init_process_group(
588
+ backend="gloo",
589
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
590
+ world_size=self.world_size,
591
+ rank=self.rank,
592
+ )
593
+
594
+ model = nn.EmbeddingBag(10, 3, sparse=True)
595
+ ddp_model = DistributedDataParallel(model)
596
+
597
+ # Different inputs for each
598
+ input = torch.LongTensor(10).random_(0, 10)
599
+ offsets = torch.LongTensor([0, 4])
600
+
601
+ # Run local.
602
+ loss = ddp_model(input, offsets).sum()
603
+ loss.backward()
604
+
605
+ with dist_autograd.context() as context_id:
606
+ loss = ddp_model(input, offsets).sum()
607
+ dist_autograd.backward(context_id, [loss])
608
+ grads_dict = dist_autograd.get_gradients(context_id)
609
+ self.assertEqual(1, len(grads_dict))
610
+ self.assertEqual(model.weight.grad, grads_dict[model.weight])
611
+
612
+ @requires_gloo()
613
+ @dist_init
614
+ def test_ddp_dist_autograd_local_vs_remote(self):
615
+ # Each trainer uses a different random seed. Otherwise, they are going
616
+ # to have exactly the same initial model parameters, input, and
617
+ # therefore grads. That means the grads will be the same before and
618
+ # after DDP's all-reduce.
619
+ torch.manual_seed(self.rank)
620
+ dist.init_process_group(
621
+ backend="gloo",
622
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
623
+ world_size=self.world_size,
624
+ rank=self.rank,
625
+ )
626
+
627
+ # Use two different remote device input string, w/ and w/o the default
628
+ # device string "cpu", respectively.
629
+ for remote_device in ["worker0/cpu", "worker0"]:
630
+ remote_layer1 = RemoteModule(
631
+ remote_device=remote_device, module_cls=nn.Linear, args=(10, 5, False)
632
+ )
633
+ layer1 = nn.Linear(10, 5, False)
634
+ # Start with the same parameters for remote and local
635
+ layer1.weight = remote_layer1.module_rref.to_here().weight
636
+
637
+ # Run local case.
638
+ layer2 = nn.Linear(5, 1)
639
+ inputs = torch.rand((10, 10))
640
+ ddp_model = DistributedDataParallel(layer2)
641
+ loss = ddp_model(layer1(inputs)).sum()
642
+ loss.backward()
643
+
644
+ # Run remote case.
645
+ with dist_autograd.context() as context_id:
646
+ loss = ddp_model(remote_layer1(inputs)).sum()
647
+ dist_autograd.backward(context_id, [loss])
648
+ grads_dict = dist_autograd.get_gradients(context_id)
649
+ dist.barrier()
650
+ self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight])
651
+ self.assertEqual(
652
+ layer1.weight.grad,
653
+ rpc.rpc_sync(
654
+ "worker0",
655
+ CommonDdpComparisonTest.get_remote_grads,
656
+ args=(remote_layer1.module_rref, context_id),
657
+ ),
658
+ )
659
+
660
+
661
+ class CudaDdpComparisonTest(CommonDdpComparisonTest):
662
+ @skip_if_lt_x_gpu(NUM_TRAINERS)
663
+ @requires_nccl()
664
+ @dist_init
665
+ @skip_if_rocm_multiprocess
666
+ def test_ddp_dist_autograd_local_vs_remote_gpu(self):
667
+ # Each trainer uses a different random seed. Otherwise, they are going
668
+ # to have exactly the same initial model parameters, input, and
669
+ # therefore grads. That means the grads will be the same before and
670
+ # after DDP's all-reduce.
671
+ torch.manual_seed(self.rank)
672
+ dist.init_process_group(
673
+ backend="gloo",
674
+ init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
675
+ world_size=self.world_size,
676
+ rank=self.rank,
677
+ )
678
+
679
+ remote_layer1 = RemoteModule(
680
+ remote_device="worker0/cpu", module_cls=nn.Linear, args=(10, 7, False)
681
+ )
682
+ layer1 = nn.Linear(10, 7, False)
683
+ # Start with the same parameters for remote and local
684
+ layer1.weight = remote_layer1.module_rref.to_here().weight
685
+
686
+ layer2 = nn.Linear(7, 5).cuda(self.rank)
687
+ ddp_layer2 = DistributedDataParallel(layer2, device_ids=[self.rank])
688
+
689
+ remote_layer3 = RemoteModule(
690
+ remote_device="worker0/cpu", module_cls=nn.Linear, args=(5, 3, False)
691
+ )
692
+ layer3 = nn.Linear(5, 3, False)
693
+ # Start with the same parameters for remote and local
694
+ layer3.weight = remote_layer3.module_rref.to_here().weight
695
+
696
+ layer4 = nn.Linear(3, 1).cuda(self.rank)
697
+ ddp_layer4 = DistributedDataParallel(layer4, device_ids=[self.rank])
698
+
699
+ # Run local case.
700
+ inputs = torch.rand((10, 10))
701
+ loss = ddp_layer4(
702
+ layer3(ddp_layer2(layer1(inputs).cuda(self.rank)).cpu()).cuda(self.rank)
703
+ ).sum()
704
+ loss.backward()
705
+
706
+ # Run remote case.
707
+ with dist_autograd.context() as context_id:
708
+ loss = ddp_layer4(
709
+ remote_layer3(
710
+ ddp_layer2(remote_layer1(inputs).cuda(self.rank)).cpu()
711
+ ).cuda(self.rank)
712
+ ).sum()
713
+ dist_autograd.backward(context_id, [loss])
714
+ grads_dict = dist_autograd.get_gradients(context_id)
715
+ dist.barrier()
716
+ self.assertEqual(
717
+ layer1.weight.grad,
718
+ rpc.rpc_sync(
719
+ "worker0",
720
+ CommonDdpComparisonTest.get_remote_grads,
721
+ args=(remote_layer1.module_rref, context_id),
722
+ ),
723
+ )
724
+ self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight])
725
+ self.assertEqual(
726
+ layer3.weight.grad,
727
+ rpc.rpc_sync(
728
+ "worker0",
729
+ CommonDdpComparisonTest.get_remote_grads,
730
+ args=(remote_layer3.module_rref, context_id),
731
+ ),
732
+ )
733
+ self.assertEqual(layer4.weight.grad, grads_dict[layer4.weight])
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ from contextlib import contextmanager
4
+ from datetime import timedelta
5
+ from functools import (
6
+ partial,
7
+ wraps,
8
+ )
9
+
10
+ import torch.distributed as dist
11
+ import torch.distributed.distributed_c10d as c10d
12
+
13
+ class MockProcessGroup(dist.ProcessGroup):
14
+
15
+ def __init__(self, rank, world):
16
+ super().__init__(rank, world)
17
+
18
+ def getBackendName(self):
19
+ return "mock_process_group"
20
+
21
+ def create_mock_pg(prefix_store, rank, world_size, timeout):
22
+ return MockProcessGroup(rank, world_size)
23
+
24
+ dist.Backend.register_backend('mock_process_group', create_mock_pg)
25
+
26
+ def mock_init_dist(rank, world_size):
27
+ # !!! WARNING !!!
28
+ # Kids don't try this at home, this is a cute pile of hacks that
29
+ # depends on a small mountain of c10d internals
30
+ assert not dist.is_initialized()
31
+ store = dist.HashStore()
32
+ # Trick _store_based_barrier into believing everyone else already checked-in
33
+ # Zero is the group index
34
+ store.add(f"{c10d.STORE_BASED_BARRIER_PREFIX}:0", world_size - 1)
35
+ dist.init_process_group(
36
+ backend="mock_process_group",
37
+ rank=rank,
38
+ world_size=world_size,
39
+ store=store,
40
+ group_name="fake",
41
+ timeout=timedelta(seconds=1))
42
+
43
+ @contextmanager
44
+ def with_dist(rank=0, world_size=2):
45
+ """
46
+ Context manager that initializer c10d with a fake process group.
47
+ """
48
+ mock_init_dist(rank=rank, world_size=world_size)
49
+ try:
50
+ yield
51
+ finally:
52
+ dist.destroy_process_group()
53
+
54
+ def with_fake_comms(func=None, rank=0, world_size=2):
55
+ """
56
+ Function wrapper that inits a fake process group designed for testing.
57
+ Right now only querying for world size is available
58
+ """
59
+ if func is None:
60
+ return partial(with_fake_comms, rank=rank, world_size=world_size)
61
+
62
+ @wraps(func)
63
+ def wrapper(self, *args, **kwargs):
64
+ with with_dist(rank, world_size):
65
+ func(self, *args, **kwargs)
66
+ return wrapper
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch.distributed as dist
4
+
5
+ from torch._C._distributed_c10d import (
6
+ FakeProcessGroup,
7
+ )
8
+
9
+
10
+ class FakeStore(dist.Store):
11
+ """
12
+ A fake store is a fake Key-Value store simply for initialization usage
13
+ the of fake process group, one can either use FakeStore or HashStore.
14
+ """
15
+
16
+
17
+ def _create_fake_pg(prefix_store, rank, world_size, timeout):
18
+ """
19
+ A fake process group (not related to FakeTensor) is a process group which
20
+ doesn't actually do any communication, it just hallucinates some
21
+ communication. You can run a single rank with a fake process group
22
+ without needing multiple processes (simulates per-rank behavior)
23
+
24
+ NOTE: This is not a real process group, and it would produce wrong results
25
+ for every collective. It should be used as a convinient tool when playing
26
+ with distributed but don't care about the actual data.
27
+ """
28
+ return FakeProcessGroup(rank, world_size)
29
+
30
+
31
+ dist.Backend.register_backend("fake", _create_fake_pg, devices=['cpu', 'cuda'])
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import sys
4
+ import threading
5
+ from dataclasses import dataclass
6
+ from typing import Dict, List, Optional, Tuple, Union
7
+ from functools import partial, reduce
8
+
9
+ import torch
10
+ import torch.distributed as dist
11
+ import weakref
12
+ from torch._C._distributed_c10d import (
13
+ _create_work_from_future,
14
+ AllgatherOptions,
15
+ AllreduceOptions,
16
+ AllToAllOptions,
17
+ BarrierOptions,
18
+ BroadcastOptions,
19
+ ReduceScatterOptions,
20
+ ScatterOptions,
21
+ Store,
22
+ ReduceOp,
23
+ )
24
+ from torch.distributed.distributed_c10d import _CollOp, _store_based_barrier, P2POp
25
+ from torch.futures import Future
26
+ from torch.utils import _pytree as pytree
27
+
28
+ """
29
+ TODO:
30
+ Lots of missing collectives.
31
+ Collectives validation.
32
+ Make timeout robust by making collectives respect the test deadline.
33
+ Make tests robust by making collectives interruptible.
34
+ We need some synchronization around cleanup to ensure that timedout ranks don't cause spurious failures.
35
+
36
+ """
37
+
38
+
39
+ def flatten_list(lst):
40
+ return pytree.tree_leaves(lst)
41
+
42
+
43
+ def ret_work(ret):
44
+ fut = Future()
45
+ fut.set_result(ret)
46
+ return _create_work_from_future(fut)
47
+
48
+ def binop_reduce(tensors, op):
49
+ res = op(torch.stack(tensors), dim=0)
50
+ if isinstance(res, torch.Tensor):
51
+ return res
52
+ # min/max return a namedtuple
53
+ return res.values
54
+
55
+ def bitwise_reduce(tensors, op):
56
+ return reduce(op, tensors)
57
+
58
+ _reduce_ops = {
59
+ ReduceOp.SUM: partial(binop_reduce, op=torch.sum),
60
+ ReduceOp.AVG: partial(binop_reduce, op=torch.mean),
61
+ ReduceOp.PRODUCT: partial(binop_reduce, op=torch.prod),
62
+ ReduceOp.MIN: partial(binop_reduce, op=torch.min),
63
+ ReduceOp.MAX: partial(binop_reduce, op=torch.max),
64
+ ReduceOp.BAND: partial(bitwise_reduce, op=torch.bitwise_and),
65
+ ReduceOp.BOR: partial(bitwise_reduce, op=torch.bitwise_or),
66
+ ReduceOp.BXOR: partial(bitwise_reduce, op=torch.bitwise_xor),
67
+ }
68
+
69
+ class AllToAll:
70
+ @torch.no_grad()
71
+ def work(self, data):
72
+ world_size = len(data)
73
+ for dest_rank in range(world_size):
74
+ output_tensor_list, _ = data[dest_rank]
75
+ for src_rank in range(world_size):
76
+ _, input_tensor_list = data[src_rank]
77
+ output_tensor_list[src_rank].copy_(input_tensor_list[dest_rank])
78
+
79
+ class AllToAllBase:
80
+ @torch.no_grad()
81
+ def work(self, data):
82
+ world_size = len(data)
83
+ for dest_rank in range(world_size):
84
+ output_buffer, _, output_split_sizes, _ = data[dest_rank]
85
+
86
+ output_indexes = self._size_cumsum(output_buffer.size(0), output_split_sizes, world_size)
87
+
88
+ for src_rank in range(world_size):
89
+ _, input_buffer, _, input_split_sizes = data[src_rank]
90
+ input_indexes = self._size_cumsum(input_buffer.size(0), input_split_sizes, world_size)
91
+
92
+ output_buffer[output_indexes[src_rank]:output_indexes[src_rank + 1]].copy_(
93
+ input_buffer[input_indexes[dest_rank]:input_indexes[dest_rank + 1]]
94
+ )
95
+
96
+ def _size_cumsum(self, buf_size: int, sizes: Union[torch.Tensor, List[int], None], world_size: int) -> torch.Tensor:
97
+ if sizes is None or len(sizes) == 0:
98
+ sizes = torch.full(
99
+ (world_size,), buf_size // world_size, dtype=torch.int64
100
+ )
101
+ if not isinstance(sizes, torch.Tensor):
102
+ sizes = torch.tensor(sizes, dtype=torch.int64)
103
+ assert sizes.dtype == torch.int64
104
+ sizes = torch.cumsum(
105
+ torch.cat(
106
+ (
107
+ torch.tensor([0], dtype=torch.int64, device=sizes.device), sizes
108
+ ),
109
+ dim=0
110
+ ),
111
+ dim=0
112
+ )
113
+ return sizes
114
+
115
+ class AllReduce:
116
+ def __init__(self, op):
117
+ if op.op not in _reduce_ops:
118
+ raise NotImplementedError(
119
+ f"AllReduce op {op.op} not supported on multithreaded pg for now."
120
+ )
121
+ self.op = op.op
122
+
123
+ @torch.no_grad()
124
+ def work(self, data):
125
+ for i in range(len(data[0])):
126
+ tensors = []
127
+ # use rank0 as the device for sum
128
+ rank_0_device = data[0][i].device
129
+ # collect all data to the list and make them
130
+ # all on rank 0 device
131
+ for src_rank in range(0, len(data)):
132
+ tensors.append(data[src_rank][i].to(rank_0_device))
133
+
134
+ # now mimic reduce across all ranks
135
+ res = _reduce_ops[self.op](tensors)
136
+
137
+ # copy all the reduced value to each rank
138
+ for src_rank in range(len(data)):
139
+ data[src_rank][i].copy_(res.to(data[src_rank][i].device))
140
+
141
+
142
+ class AllGather:
143
+ @torch.no_grad()
144
+ def work(self, data):
145
+ for src_rank in range(len(data)):
146
+ in_tensor_list = data[src_rank][1]
147
+ # Can't handle all_gather with multiple tensors
148
+ assert len(in_tensor_list) == 1
149
+ src_tensor = in_tensor_list[0]
150
+
151
+ for dest in data:
152
+ dest_tensor = dest[0][0][src_rank]
153
+ dest_tensor.copy_(src_tensor)
154
+
155
+
156
+ class Scatter:
157
+ def __init__(self, src):
158
+ self.src = src
159
+
160
+ @torch.no_grad()
161
+ def work(self, data):
162
+ src_in_tensor_list = data[self.src][1]
163
+ # Can't handle scatter with multiple input tensor list
164
+ assert len(src_in_tensor_list) == 1
165
+ src_in_tensors = src_in_tensor_list[0]
166
+
167
+ for rank, each_rank_data in enumerate(data):
168
+ out_tensor_list = each_rank_data[0]
169
+ # Can't handle scatter with multiple output tensor
170
+ assert len(out_tensor_list) == 1
171
+ dest_tensor = out_tensor_list[0]
172
+ dest_tensor.copy_(src_in_tensors[rank])
173
+
174
+
175
+ class Gather:
176
+ def __init__(self, dst):
177
+ self.dst = dst
178
+
179
+ @torch.no_grad()
180
+ def work(self, data):
181
+ # Can't handle gather with multiple tensor lists
182
+ assert len(data[self.dst][0]) == 1
183
+ out_tensor_list = data[self.dst][0][0]
184
+ for rank, each_rank_data in enumerate(data):
185
+ src_in_tensor_list = each_rank_data[1]
186
+ # Can't handle gather with multiple tensor lists
187
+ assert len(src_in_tensor_list) == 1
188
+ dest_tensor = out_tensor_list[rank]
189
+ dest_tensor.copy_(src_in_tensor_list[0])
190
+
191
+ class ReduceScatter:
192
+ def __init__(self, op):
193
+ if op != dist.ReduceOp.SUM and op != dist.ReduceOp.AVG:
194
+ raise NotImplementedError(f"ReduceScatter does not support {op}")
195
+ self.op = op
196
+
197
+ @torch.no_grad()
198
+ def work(self, data):
199
+ start_reduction = [False for _ in range(len(data))]
200
+ for each_rank_data in data:
201
+ # Can't handle reduce_scatter with multiple scatter list
202
+ assert len(each_rank_data[1]) == 1
203
+ to_scatter = each_rank_data[1][0]
204
+ for i in range(len(to_scatter)):
205
+ dest_tensor_on_rank_i = data[i][0]
206
+ # Can't handle reduce_scatter with multiple output tensor
207
+ assert len(dest_tensor_on_rank_i) == 1
208
+ dst_tensor_device = dest_tensor_on_rank_i[0].device
209
+ if not start_reduction[i]:
210
+ dest_tensor_on_rank_i[0].copy_(to_scatter[i].to(dst_tensor_device))
211
+ start_reduction[i] = True
212
+ else:
213
+ dest_tensor_on_rank_i[0].add_(to_scatter[i].to(dst_tensor_device))
214
+ if self.op == dist.ReduceOp.AVG:
215
+ num_ranks = len(data)
216
+ for each_rank_data in data:
217
+ each_rank_data[0][0] /= num_ranks
218
+
219
+
220
+ class Broadcast:
221
+ def __init__(self, src):
222
+ self.src = src
223
+
224
+ @torch.no_grad()
225
+ def work(self, data):
226
+ in_tensor_list = flatten_list(data[self.src])
227
+ for i in range(len(data)):
228
+ out_tensor_list = flatten_list(data[i])
229
+ for j in range(len(in_tensor_list)):
230
+ out_tensor_list[j].copy_(in_tensor_list[j])
231
+
232
+
233
+ class Collective:
234
+ def __init__(self, world_size, collective, pg):
235
+ self._world_size = world_size
236
+ self._collective = collective
237
+
238
+ self._start_cond = threading.Condition()
239
+ self._done_cond = threading.Condition()
240
+
241
+ self._data = [None] * world_size
242
+ self._count = 0
243
+ self._done = False
244
+
245
+ self._pg = pg
246
+
247
+ def join(self, rank, data):
248
+ with self._start_cond:
249
+ self._data[rank] = data
250
+ self._count += 1
251
+
252
+ # notify rank 0
253
+ if self._count == self._world_size:
254
+ if rank > 0:
255
+ self._start_cond.notify()
256
+
257
+ if rank == 0:
258
+ self._start_cond.wait_for(
259
+ lambda: self._count == self._world_size or self._pg._terminate.is_set()
260
+ )
261
+ # SystemExit is not a subclass of Exception but BaseException
262
+ # and can be distinguished from normal exception raised from program errors
263
+ # so that we can hide it from the exception queue
264
+ if self._pg._terminate.is_set():
265
+ sys.exit("Test termination event occurs.")
266
+
267
+ with self._done_cond:
268
+ # wait for rank 0 to finish
269
+ if rank > 0:
270
+ self._done_cond.wait_for(lambda: self._done or self._pg._terminate.is_set())
271
+ if self._pg._terminate.is_set():
272
+ sys.exit("Test termination event occurs.")
273
+ else:
274
+ # copy data around
275
+ self._collective.work(self._data)
276
+ self._done = True
277
+ self._done_cond.notify_all()
278
+ return ret_work(data)
279
+
280
+
281
+ class ProcessLocalGroup(dist.ProcessGroup):
282
+ _coll_lock = threading.Lock()
283
+ _cur_coll_on_pgs = {}
284
+
285
+ _terminate = threading.Event()
286
+
287
+ @classmethod
288
+ def _start_coll(cls, collective, pg):
289
+ with cls._coll_lock:
290
+ # pg_name is unique, we use that to record the mapping between pg and collective
291
+ if pg.pg_name not in cls._cur_coll_on_pgs:
292
+ cls._cur_coll_on_pgs[pg.pg_name] = Collective(pg.size(), collective, cls)
293
+ return cls._cur_coll_on_pgs[pg.pg_name]
294
+
295
+ @classmethod
296
+ def _end_coll(cls, collective, pg):
297
+ # This is racily called by all ranks, so only one will work
298
+ with cls._coll_lock:
299
+ if pg.pg_name in cls._cur_coll_on_pgs and cls._cur_coll_on_pgs[pg.pg_name] == collective:
300
+ cls._cur_coll_on_pgs.pop(pg.pg_name)
301
+
302
+ @classmethod
303
+ def exception_handle(cls, exc):
304
+ cls._terminate.set()
305
+ for coll in cls._cur_coll_on_pgs.values():
306
+ with coll._start_cond:
307
+ coll._start_cond.notify()
308
+ with coll._done_cond:
309
+ coll._done_cond.notify_all()
310
+
311
+ @classmethod
312
+ def reset(cls):
313
+ with cls._coll_lock:
314
+ cls._cur_coll_on_pgs = {}
315
+ cls._terminate.clear()
316
+
317
+ def alltoall_base(
318
+ self,
319
+ output_buffer: torch.Tensor,
320
+ input_buffer: torch.Tensor,
321
+ output_split_sizes: Optional[List[int]],
322
+ input_split_sizes: Optional[List[int]],
323
+ opts=AllToAllOptions()
324
+ ) -> torch.Tensor:
325
+ coll = ProcessLocalGroup._start_coll(AllToAllBase(), self)
326
+ res = coll.join(self._rank, (output_buffer, input_buffer, output_split_sizes, input_split_sizes))
327
+ ProcessLocalGroup._end_coll(coll, self)
328
+ return res
329
+
330
+ def alltoall(self, output_tensor_list, input_tensor_list, opts=AllToAllOptions()):
331
+ coll = ProcessLocalGroup._start_coll(AllToAll(), self)
332
+ res = coll.join(self._rank, (output_tensor_list, input_tensor_list))
333
+ ProcessLocalGroup._end_coll(coll, self)
334
+ return res
335
+
336
+ def allreduce(self, tensor_list, opts=AllreduceOptions()):
337
+ coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self)
338
+ res = coll.join(self._rank, tensor_list)
339
+ ProcessLocalGroup._end_coll(coll, self)
340
+ return res
341
+
342
+ def allreduce_coalesced(self, tensor_list, opts=AllreduceOptions()):
343
+ coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self)
344
+ res = coll.join(self._rank, tensor_list)
345
+ ProcessLocalGroup._end_coll(coll, self)
346
+ return res
347
+
348
+ def barrier(self, opts=BarrierOptions()):
349
+ return self.allreduce(tensor_list=[torch.ones(1)])
350
+
351
+ def allgather(self, output_tensors, input_tensor, opts=AllgatherOptions()):
352
+ coll = ProcessLocalGroup._start_coll(AllGather(), self)
353
+ res = coll.join(self._rank, (output_tensors, input_tensor))
354
+ ProcessLocalGroup._end_coll(coll, self)
355
+ return res
356
+
357
+ def _allgather_base(self, output_tensor, input_tensor, opts=AllgatherOptions()):
358
+ tensor_list = list(torch.chunk(output_tensor, self._world_size))
359
+ return self.allgather([tensor_list], [input_tensor], opts)
360
+
361
+ def broadcast(self, tensor_list, opts=BroadcastOptions()):
362
+ coll = ProcessLocalGroup._start_coll(Broadcast(opts.rootRank), self)
363
+ res = coll.join(self._rank, tensor_list)
364
+ ProcessLocalGroup._end_coll(coll, self)
365
+ return res
366
+
367
+ def scatter(self, output_tensors, input_tensors, opts=ScatterOptions()):
368
+ coll = ProcessLocalGroup._start_coll(Scatter(opts.rootRank), self)
369
+ res = coll.join(self._rank, (output_tensors, input_tensors))
370
+ ProcessLocalGroup._end_coll(coll, self)
371
+ return res
372
+
373
+ def gather(self, output_tensors, input_tensors, opts=ScatterOptions()):
374
+ coll = ProcessLocalGroup._start_coll(Gather(opts.rootRank), self)
375
+ res = coll.join(self._rank, (output_tensors, input_tensors))
376
+ ProcessLocalGroup._end_coll(coll, self)
377
+ return res
378
+
379
+ def reduce_scatter(self, output_tensor, scatter_list, opts=ReduceScatterOptions()):
380
+ coll = ProcessLocalGroup._start_coll(ReduceScatter(opts.reduceOp), self)
381
+ res = coll.join(self._rank, (output_tensor, scatter_list))
382
+ ProcessLocalGroup._end_coll(coll, self)
383
+ return res
384
+
385
+ def _reduce_scatter_base(self, output_tensor, input_tensor, opts=ReduceScatterOptions()):
386
+ tensor_list = list(torch.chunk(input_tensor, self._world_size))
387
+ return self.reduce_scatter([output_tensor], [tensor_list], opts)
388
+
389
+ def reduce_scatter_tensor_coalesced(self, output_tensors, input_tensors, opts=ReduceScatterOptions()):
390
+ works = [
391
+ self._reduce_scatter_base(output_tensor, input_tensor, opts)
392
+ for output_tensor, input_tensor
393
+ in zip(output_tensors, input_tensors)
394
+ ]
395
+ for work in works[:-1]:
396
+ work.wait()
397
+ return works[-1]
398
+
399
+ def allgather_into_tensor_coalesced(self, output_tensor_list, input_tensor_list, opts=AllgatherOptions()):
400
+ res = None
401
+ for o_t, i_t in zip(output_tensor_list, input_tensor_list):
402
+ res = self._allgather_base(o_t, i_t)
403
+ return res
404
+
405
+ def __init__(self, rank, world_size):
406
+ super().__init__(rank, world_size)
407
+ self._rank = rank
408
+ self._world_size = world_size
409
+ world = dist.distributed_c10d._world
410
+ if isinstance(world, ThreadLocalWorld):
411
+ world = world._get_world()
412
+ self._world = weakref.ref(world)
413
+ self._ctx = torch.autograd.set_multithreading_enabled(False)
414
+
415
+ def size(self):
416
+ return self._world_size
417
+
418
+ @property
419
+ def pg_name(self):
420
+ """
421
+ return the global registered name of the current pg in the world
422
+ """
423
+ return self._world().pg_names[self]
424
+
425
+ @property
426
+ def group_name(self):
427
+ return self.pg_name
428
+
429
+ def getBackendName(self):
430
+ return "threaded"
431
+
432
+ def __repr__(self):
433
+ return f"ThreadedPG world_size:{self._world_size} rank:{self._rank}"
434
+
435
+
436
+ def _create_threaded_pg(prefix_store, rank, world_size, timeout):
437
+ pg = ProcessLocalGroup(rank, world_size)
438
+ # https://github.com/pytorch/pytorch/pull/103033 changed store based barrier to optional
439
+ # When device mesh involves sub groups while store based barrier is not enabled in c10d,
440
+ # even though threaded pg actual collectives are assumed to be single threaded,
441
+ # different threads may be initializing different groups,
442
+ # leading to race conditions.
443
+ # For example, if we have a mesh of [[0, 1], [2, 3]], the sub groups
444
+ # (dim 0 and 1) would be initialized in different threads independently.
445
+ # In this case we can no longer rely on class or global variables
446
+ # but have to rely on store based barrier to make sure each group
447
+ # is ready separately before we can invoke collectives in any of the groups.
448
+
449
+ # the prefix store is already per group so we pass an empty name here
450
+ _store_based_barrier(rank, prefix_store, "", world_size, timeout)
451
+ return pg
452
+
453
+
454
+ dist.Backend.register_backend("threaded", _create_threaded_pg, devices=["cpu", "cuda"])
455
+
456
+
457
+ @dataclass
458
+ class WorldData:
459
+ default_pg: dist.ProcessGroup
460
+ pg_map: Dict[dist.ProcessGroup, Tuple[str, Optional[Store]]]
461
+ pg_names: Dict[dist.ProcessGroup, str]
462
+ pg_group_ranks: Dict[dist.ProcessGroup, Dict[int, int]]
463
+ pg_backend_config: Dict[dist.ProcessGroup, str]
464
+ group_count: int
465
+ tags_to_pg: Dict[str, List[dist.ProcessGroup]]
466
+ pg_to_tag: Dict[dist.ProcessGroup, str]
467
+ pg_coalesce_state: Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]]
468
+ pg_default_device: Dict[dist.ProcessGroup, torch.device]
469
+
470
+
471
+ class ThreadLocalWorld:
472
+ _world = threading.local()
473
+
474
+ def _get_world(self) -> WorldData:
475
+ if not hasattr(ThreadLocalWorld._world, "world"):
476
+ ThreadLocalWorld._world.world = WorldData(None, {}, {}, {}, {}, 0, {}, {}, {}, {})
477
+ return ThreadLocalWorld._world.world
478
+
479
+ @property
480
+ def default_pg(self):
481
+ return self._get_world().default_pg
482
+
483
+ @default_pg.setter
484
+ def default_pg(self, value):
485
+ self._get_world().default_pg = value
486
+
487
+ @property
488
+ def pg_map(self):
489
+ return self._get_world().pg_map
490
+
491
+ @property
492
+ def pg_names(self):
493
+ return self._get_world().pg_names
494
+
495
+ @property
496
+ def pg_group_ranks(self):
497
+ return self._get_world().pg_group_ranks
498
+
499
+ @property
500
+ def pg_backend_config(self):
501
+ return self._get_world().pg_backend_config
502
+
503
+ @property
504
+ def group_count(self) -> int:
505
+ return self._get_world().group_count
506
+
507
+ @group_count.setter
508
+ def group_count(self, value):
509
+ self._get_world().group_count = value
510
+
511
+ @property
512
+ def tags_to_pg(self):
513
+ return self._get_world().tags_to_pg
514
+
515
+ @property
516
+ def pg_to_tag(self):
517
+ return self._get_world().pg_to_tag
518
+
519
+ @property
520
+ def pg_coalesce_state(self) -> Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]]:
521
+ return self._get_world().pg_coalesce_state
522
+
523
+ @property
524
+ def pg_default_device(self) -> Dict[dist.ProcessGroup, torch.device]:
525
+ return self._get_world().pg_default_device
526
+
527
+
528
+ _old_pg_world = None
529
+ _ctx_manager = None
530
+
531
+
532
+ def _install_threaded_pg():
533
+ global _old_pg_world
534
+ global _ctx_manager
535
+ _old_pg_world = dist.distributed_c10d._world
536
+ dist.distributed_c10d._world = ThreadLocalWorld()
537
+ _ctx_manager = torch.autograd.set_multithreading_enabled(False)
538
+
539
+ return dist.distributed_c10d._world
540
+
541
+
542
+ def _uninstall_threaded_pg():
543
+ dist.distributed_c10d._world = _old_pg_world
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py ADDED
File without changes
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py ADDED
File without changes
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__init__.py ADDED
File without changes
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/tensorpipe_rpc_agent_test_fixture.cpython-310.pyc ADDED
Binary file (1.75 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+
4
+ import threading
5
+
6
+ import torch
7
+ import torch.distributed.autograd as dist_autograd
8
+ import torch.distributed.rpc as rpc
9
+ from torch import optim
10
+ from torch.distributed.optim import DistributedOptimizer
11
+ from torch.testing._internal.dist_utils import dist_init
12
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
13
+ RpcAgentTestFixture,
14
+ )
15
+
16
+
17
+ class MyModule:
18
+ lock = threading.Lock()
19
+
20
+ def __init__(self, requires_grad=True):
21
+ # cannot directly use torch.manual_seed(0) as all threads share the same
22
+ # default generator. The race from multiple RPC threads could mess up
23
+ # the draw order from the default RNG instance, leading to
24
+ # non-deterministic behavior. Hence, create a dedicated RNG here.
25
+ g_cpu = torch.Generator()
26
+ g_cpu.manual_seed(0)
27
+ self.w = torch.rand((3, 3), requires_grad=requires_grad, generator=g_cpu)
28
+
29
+ def forward(self, t1):
30
+ return torch.mm(self.w, t1)
31
+
32
+ def get_w(self):
33
+ return self.w
34
+
35
+
36
+ class FailingOptimizer(optim.Optimizer):
37
+ def __init__(self, params):
38
+ super().__init__(params, {})
39
+
40
+ def step(self, closure=None):
41
+ raise ValueError("Error running optimizer.")
42
+
43
+
44
+ class OptimizerFailingOnConstructor(optim.Optimizer):
45
+ def __init__(self, params):
46
+ super().__init__(params, {})
47
+ raise ValueError("Error creating optimizer.")
48
+
49
+ def step(self, closure=None):
50
+ raise NotImplementedError
51
+
52
+
53
+ def _call_method(method, obj_rref, *args, **kwargs):
54
+ return method(obj_rref.local_value(), *args, **kwargs)
55
+
56
+
57
+ def remote_method(method, obj_rref, *args, **kwargs):
58
+ """
59
+ Call rpc.remote on a method in a remote object.
60
+
61
+ Args:
62
+ method: the method (for example, Class.method)
63
+ obj_rref (RRef): remote reference to the object
64
+ args: positional arguments to pass to the method
65
+ kwargs: keyword arguments to pass to the method
66
+
67
+ Returns a RRef to the remote method call result.
68
+ """
69
+ return rpc.remote(
70
+ obj_rref.owner(),
71
+ _call_method,
72
+ args=[method, obj_rref] + list(args),
73
+ kwargs=kwargs,
74
+ )
75
+
76
+
77
+ def rpc_async_method(method, obj_rref, *args, **kwargs):
78
+ """
79
+ Call rpc.rpc_async on a method in a remote object.
80
+
81
+ Args:
82
+ method: the method (for example, Class.method)
83
+ obj_rref (RRef): remote reference to the object
84
+ args: positional arguments to pass to the method
85
+ kwargs: keyword arguments to pass to the method
86
+
87
+ Returns a Future to the method call result.
88
+ """
89
+ return rpc.rpc_async(
90
+ obj_rref.owner(),
91
+ _call_method,
92
+ args=[method, obj_rref] + list(args),
93
+ kwargs=kwargs,
94
+ )
95
+
96
+
97
+ class DistOptimizerTest(RpcAgentTestFixture):
98
+ @dist_init()
99
+ def test_dist_optim_exception(self):
100
+ # distributed version
101
+ owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
102
+ owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
103
+
104
+ remote_module1 = rpc.remote(owner1, MyModule)
105
+ remote_module2 = rpc.remote(owner2, MyModule)
106
+ remote_param1 = remote_method(MyModule.get_w, remote_module1)
107
+ remote_param2 = remote_method(MyModule.get_w, remote_module2)
108
+
109
+ dist_optim = DistributedOptimizer(
110
+ FailingOptimizer, [remote_param1, remote_param2]
111
+ )
112
+
113
+ with dist_autograd.context() as context_id:
114
+ g_cpu = torch.Generator()
115
+ g_cpu.manual_seed(0)
116
+ t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
117
+ t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
118
+ output1 = rpc_async_method(MyModule.forward, remote_module1, t2)
119
+ output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait())
120
+ loss = torch.add(output2.wait(), t1).sum()
121
+
122
+ dist_autograd.backward(context_id, [loss])
123
+ with self.assertRaisesRegex(Exception, "Error running optimizer"):
124
+ dist_optim.step(context_id)
125
+
126
+ @dist_init()
127
+ def test_dist_optim_exception_on_constructor(self):
128
+ # distributed version
129
+ owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
130
+ owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
131
+
132
+ remote_module1 = rpc.remote(owner1, MyModule)
133
+ remote_module2 = rpc.remote(owner2, MyModule)
134
+ remote_param1 = remote_method(MyModule.get_w, remote_module1)
135
+ remote_param2 = remote_method(MyModule.get_w, remote_module2)
136
+
137
+ with self.assertRaisesRegex(Exception, "Error creating optimizer."):
138
+ dist_optim = DistributedOptimizer(
139
+ OptimizerFailingOnConstructor, [remote_param1, remote_param2]
140
+ )
141
+
142
+ def _test_dist_optim_base(self, optim_cls, *args, **kwargs):
143
+ # local version
144
+ module1 = MyModule()
145
+ module2 = MyModule()
146
+ params = [module1.get_w(), module2.get_w()]
147
+ local_optim = optim_cls(params, *args, **kwargs)
148
+
149
+ old_w1 = module1.w.clone().detach()
150
+ old_w2 = module2.w.clone().detach()
151
+
152
+ g_cpu = torch.Generator()
153
+ g_cpu.manual_seed(0)
154
+ t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
155
+ t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
156
+ output1 = module1.forward(t2)
157
+ output2 = module2.forward(output1)
158
+ loss = torch.add(output2, t1).sum()
159
+
160
+ loss.backward()
161
+ local_optim.step()
162
+
163
+ # distributed version
164
+ owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
165
+ owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
166
+
167
+ remote_module1 = rpc.remote(owner1, MyModule)
168
+ remote_module2 = rpc.remote(owner2, MyModule)
169
+ remote_param1 = remote_method(MyModule.get_w, remote_module1)
170
+ remote_param2 = remote_method(MyModule.get_w, remote_module2)
171
+
172
+ old_w1_remote = remote_param1.to_here()
173
+
174
+ # sanity check: local and remote initial weights should match
175
+ self.assertEqual(old_w1, remote_param1.to_here())
176
+ self.assertEqual(old_w2, remote_param2.to_here())
177
+
178
+ dist_optim = DistributedOptimizer(
179
+ optim_cls, [remote_param1, remote_param2], *args, **kwargs
180
+ )
181
+
182
+ with dist_autograd.context() as context_id:
183
+ g_cpu.manual_seed(0)
184
+ t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
185
+ t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
186
+ output1 = rpc_async_method(MyModule.forward, remote_module1, t2)
187
+ output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait())
188
+ loss = torch.add(output2.wait(), t1)
189
+
190
+ dist_autograd.backward(context_id, [loss.sum()])
191
+ dist_optim.step(context_id)
192
+
193
+ new_w1 = rpc_async_method(MyModule.get_w, remote_module1).wait()
194
+ new_w2 = rpc_async_method(MyModule.get_w, remote_module2).wait()
195
+
196
+ # ensure optimizer changed weights
197
+ self.assertNotEqual(old_w1, new_w1)
198
+ self.assertNotEqual(old_w2, new_w2)
199
+ # ensure local equals remote
200
+ self.assertEqual(new_w1, module1.get_w())
201
+ self.assertEqual(new_w2, module2.get_w())
202
+
203
+ @dist_init()
204
+ def test_dist_optim(self):
205
+ self._test_dist_optim_base(optim.Adagrad, lr=0.05)
206
+ self._test_dist_optim_base(optim.Adam, lr=1e-2, amsgrad=True)
207
+ self._test_dist_optim_base(optim.AdamW, lr=0.05, amsgrad=True)
208
+ self._test_dist_optim_base(optim.SGD, lr=0.05)
209
+ self._test_dist_optim_base(optim.SGD, lr=1e-3, momentum=1, weight_decay=1, nesterov=True)
210
+ self._test_dist_optim_base(optim.Adadelta, rho=0.95)
211
+ self._test_dist_optim_base(optim.RMSprop, lr=0.05)
212
+ self._test_dist_optim_base(optim.Adamax, lr=0.05)
213
+ self._test_dist_optim_base(optim.Rprop, lr=0.05)
214
+
215
+ def _test_dist_optim_none_grads(self, optim_cls, *args, **kwargs):
216
+ # local version
217
+ module1 = MyModule()
218
+ module2 = MyModule(requires_grad=False)
219
+ params = [module1.get_w(), module2.get_w()]
220
+ local_optim = optim_cls(params, *args, **kwargs)
221
+
222
+ old_w1 = module1.w.clone().detach()
223
+ old_w2 = module2.w.clone().detach()
224
+
225
+ g_cpu = torch.Generator()
226
+ g_cpu.manual_seed(0)
227
+ t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
228
+ t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
229
+ output1 = module1.forward(t2)
230
+ output2 = module2.forward(output1)
231
+ loss = torch.add(output2, t1).sum()
232
+
233
+ loss.backward()
234
+ local_optim.step()
235
+
236
+ # distributed version
237
+ owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
238
+ owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
239
+
240
+ remote_module1 = rpc.remote(owner1, MyModule)
241
+ remote_module2 = rpc.remote(owner2, MyModule, args=(False,))
242
+ remote_param1 = remote_module1.remote().get_w()
243
+ remote_param2 = remote_module2.remote().get_w()
244
+
245
+ # sanity check: local and remote initial weights should match
246
+ self.assertEqual(old_w1, remote_param1.to_here())
247
+ self.assertEqual(old_w2, remote_param2.to_here())
248
+
249
+ dist_optim = DistributedOptimizer(
250
+ optim_cls, [remote_param1, remote_param2], *args, **kwargs
251
+ )
252
+
253
+ with dist_autograd.context() as context_id:
254
+ g_cpu.manual_seed(0)
255
+ t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
256
+ t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
257
+ output1 = remote_module1.rpc_async().forward(t2)
258
+ output2 = remote_module2.rpc_async().forward(output1.wait())
259
+ loss = torch.add(output2.wait(), t1)
260
+
261
+ dist_autograd.backward(context_id, [loss.sum()])
262
+ dist_optim.step(context_id)
263
+
264
+ new_w1 = remote_module1.rpc_async().get_w().wait()
265
+ new_w2 = remote_module2.rpc_async().get_w().wait()
266
+
267
+ # ensure optimizer changed weights for w1
268
+ self.assertNotEqual(old_w1, new_w1)
269
+
270
+ # ensure optimizer not changed weights for w2
271
+ self.assertEqual(old_w2, new_w2)
272
+ # ensure local equals remote
273
+ self.assertEqual(new_w1, module1.get_w())
274
+ self.assertEqual(new_w2, module2.get_w())
275
+
276
+ @dist_init()
277
+ def test_dist_optim_none_grads(self):
278
+ self._test_dist_optim_none_grads(optim.SGD, lr=0.05)
279
+ self._test_dist_optim_none_grads(optim.RMSprop, lr=0.05)
280
+ self._test_dist_optim_none_grads(optim.Rprop, lr=0.05)
281
+ self._test_dist_optim_none_grads(optim.Adadelta, rho=0.95)
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__init__.py ADDED
File without changes
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (206 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc ADDED
Binary file (4.97 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc ADDED
Binary file (8.98 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ # If you need to modify this file to make this test pass, please also apply same edits accordingly to
4
+ # https://github.com/pytorch/examples/blob/master/distributed/rpc/batch/parameter_server.py
5
+ # and https://pytorch.org/tutorials/intermediate/rpc_async_execution.html#batch-updating-parameter-server
6
+
7
+ import threading
8
+ from datetime import datetime
9
+ from time import perf_counter
10
+
11
+ import torch
12
+ import torch.distributed.rpc as rpc
13
+ import torch.nn as nn
14
+ from torch import optim
15
+
16
+ from torch.testing._internal.dist_utils import (
17
+ dist_init,
18
+ worker_name,
19
+ )
20
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture
21
+
22
+ batch_size = 20
23
+ in_features = 100
24
+ out_features = 30
25
+ num_batches = 4
26
+
27
+
28
+ def timed_log(text):
29
+ print(f"{datetime.now().strftime('%H:%M:%S')} {text}")
30
+
31
+
32
+ class BatchUpdateParameterServer:
33
+
34
+ def __init__(self, batch_update_size):
35
+ self.model = nn.Linear(in_features, out_features)
36
+ self.lock = threading.Lock()
37
+ self.future_model = torch.futures.Future()
38
+ self.batch_update_size = batch_update_size
39
+ self.curr_update_size = 0
40
+ self.optimizer = optim.SGD(self.model.parameters(), lr=0.001, momentum=0.9)
41
+ for p in self.model.parameters():
42
+ p.grad = torch.zeros_like(p)
43
+
44
+ def get_model(self):
45
+ return self.model
46
+
47
+ @staticmethod
48
+ @rpc.functions.async_execution
49
+ def update_and_fetch_model(ps_rref, grads):
50
+ self = ps_rref.local_value()
51
+ for p, g in zip(self.model.parameters(), grads):
52
+ if p.grad is None:
53
+ p.grad = g
54
+ else:
55
+ p.grad += g
56
+ with self.lock:
57
+ timed_log(f"PS got {self.curr_update_size}/{self.batch_update_size} updates")
58
+ self.curr_update_size += 1
59
+ fut = self.future_model
60
+
61
+ if self.curr_update_size >= self.batch_update_size:
62
+ for p in self.model.parameters():
63
+ p.grad /= self.batch_update_size
64
+ self.curr_update_size = 0
65
+ self.optimizer.step()
66
+ self.optimizer.zero_grad()
67
+ fut.set_result(self.model)
68
+ timed_log("PS updated model")
69
+ self.future_model = torch.futures.Future()
70
+
71
+ return fut
72
+
73
+
74
+ class Trainer:
75
+
76
+ def __init__(self, ps_rref):
77
+ self.ps_rref = ps_rref
78
+ self.loss_fn = nn.L1Loss()
79
+
80
+ def get_next_batch(self):
81
+ for _ in range(num_batches):
82
+ inputs = torch.randn(batch_size, in_features)
83
+ labels = torch.zeros(batch_size, out_features)
84
+ yield inputs, labels
85
+
86
+ def train(self):
87
+ name = rpc.get_worker_info().name
88
+ m = self.ps_rref.rpc_sync().get_model()
89
+ for inputs, labels in self.get_next_batch():
90
+ timed_log(f"{name} processing one batch")
91
+ self.loss_fn(m(inputs), labels).backward()
92
+ timed_log(f"{name} reporting grads")
93
+ m = rpc.rpc_sync(
94
+ self.ps_rref.owner(),
95
+ BatchUpdateParameterServer.update_and_fetch_model,
96
+ args=(self.ps_rref, [p.grad for p in m.cpu().parameters()]),
97
+ )
98
+ timed_log(f"{name} got updated model")
99
+
100
+
101
+ def run_trainer(ps_rref):
102
+ trainer = Trainer(ps_rref)
103
+ trainer.train()
104
+
105
+
106
+ def run_ps(trainers):
107
+ timed_log("Start training")
108
+ start = perf_counter()
109
+ ps_rref = rpc.RRef(BatchUpdateParameterServer(len(trainers)))
110
+ futs = []
111
+ for trainer in trainers:
112
+ futs.append(
113
+ rpc.rpc_async(trainer, run_trainer, args=(ps_rref,))
114
+ )
115
+
116
+ torch.futures.wait_all(futs)
117
+ stop = perf_counter()
118
+ timed_log("Finish training")
119
+ timed_log(f"Time spent training: {stop-start}s")
120
+
121
+ class ParameterServerTest(RpcAgentTestFixture):
122
+
123
+ @dist_init(setup_rpc=False)
124
+ def test_batch_updating_parameter_server(self):
125
+
126
+ if self.rank != 0:
127
+ rpc.init_rpc(
128
+ name=worker_name(self.rank),
129
+ backend=self.rpc_backend,
130
+ rank=self.rank,
131
+ world_size=self.world_size,
132
+ rpc_backend_options=self.rpc_backend_options,
133
+ )
134
+ else:
135
+ rpc.init_rpc(
136
+ name=worker_name(self.rank),
137
+ backend=self.rpc_backend,
138
+ rank=self.rank,
139
+ world_size=self.world_size,
140
+ rpc_backend_options=self.rpc_backend_options,
141
+ )
142
+ run_ps([f"{worker_name(r)}" for r in range(1, self.world_size)])
143
+
144
+ rpc.shutdown()
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ # If you need to modify this file to make this test pass, please also apply same edits accordingly to
4
+ # https://github.com/pytorch/examples/blob/master/distributed/rpc/rl/main.py
5
+ # and https://pytorch.org/tutorials/intermediate/rpc_tutorial.html
6
+
7
+ import numpy as np
8
+ from itertools import count
9
+
10
+ import torch
11
+ import torch.distributed.rpc as rpc
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ import torch.optim as optim
15
+ from torch.distributed.rpc import RRef, rpc_sync, rpc_async, remote
16
+ from torch.distributions import Categorical
17
+
18
+ from torch.testing._internal.dist_utils import dist_init, worker_name
19
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture
20
+
21
+ TOTAL_EPISODE_STEP = 5000
22
+ GAMMA = 0.1
23
+ SEED = 543
24
+
25
+ def _call_method(method, rref, *args, **kwargs):
26
+ r"""
27
+ a helper function to call a method on the given RRef
28
+ """
29
+ return method(rref.local_value(), *args, **kwargs)
30
+
31
+
32
+ def _remote_method(method, rref, *args, **kwargs):
33
+ r"""
34
+ a helper function to run method on the owner of rref and fetch back the
35
+ result using RPC
36
+ """
37
+ args = [method, rref] + list(args)
38
+ return rpc_sync(rref.owner(), _call_method, args=args, kwargs=kwargs)
39
+
40
+
41
+ class Policy(nn.Module):
42
+ r"""
43
+ Borrowing the ``Policy`` class from the Reinforcement Learning example.
44
+ Copying the code to make these two examples independent.
45
+ See https://github.com/pytorch/examples/tree/master/reinforcement_learning
46
+ """
47
+ def __init__(self) -> None:
48
+ super().__init__()
49
+ self.affine1 = nn.Linear(4, 128)
50
+ self.dropout = nn.Dropout(p=0.6)
51
+ self.affine2 = nn.Linear(128, 2)
52
+
53
+ self.saved_log_probs = []
54
+ self.rewards = []
55
+
56
+ def forward(self, x):
57
+ x = self.affine1(x)
58
+ x = self.dropout(x)
59
+ x = F.relu(x)
60
+ action_scores = self.affine2(x)
61
+ return F.softmax(action_scores, dim=1)
62
+
63
+
64
+ class DummyEnv:
65
+ r"""
66
+ A dummy environment that implements the required subset of the OpenAI gym
67
+ interface. It exists only to avoid a dependency on gym for running the
68
+ tests in this file. It is designed to run for a set max number of iterations,
69
+ returning random states and rewards at each step.
70
+ """
71
+ def __init__(self, state_dim=4, num_iters=10, reward_threshold=475.0):
72
+ self.state_dim = state_dim
73
+ self.num_iters = num_iters
74
+ self.iter = 0
75
+ self.reward_threshold = reward_threshold
76
+
77
+ def seed(self, manual_seed):
78
+ torch.manual_seed(manual_seed)
79
+
80
+ def reset(self):
81
+ self.iter = 0
82
+ return torch.randn(self.state_dim)
83
+
84
+ def step(self, action):
85
+ self.iter += 1
86
+ state = torch.randn(self.state_dim)
87
+ reward = torch.rand(1).item() * self.reward_threshold
88
+ done = self.iter >= self.num_iters
89
+ info = {}
90
+ return state, reward, done, info
91
+
92
+
93
+ class Observer:
94
+ r"""
95
+ An observer has exclusive access to its own environment. Each observer
96
+ captures the state from its environment, and send the state to the agent to
97
+ select an action. Then, the observer applies the action to its environment
98
+ and reports the reward to the agent.
99
+ """
100
+ def __init__(self) -> None:
101
+ self.id = rpc.get_worker_info().id
102
+ self.env = DummyEnv()
103
+ self.env.seed(SEED)
104
+
105
+ def run_episode(self, agent_rref, n_steps):
106
+ r"""
107
+ Run one episode of n_steps.
108
+ Arguments:
109
+ agent_rref (RRef): an RRef referencing the agent object.
110
+ n_steps (int): number of steps in this episode
111
+ """
112
+ state, ep_reward = self.env.reset(), 0
113
+ for step in range(n_steps):
114
+ # send the state to the agent to get an action
115
+ action = _remote_method(Agent.select_action, agent_rref, self.id, state)
116
+
117
+ # apply the action to the environment, and get the reward
118
+ state, reward, done, _ = self.env.step(action)
119
+
120
+ # report the reward to the agent for training purpose
121
+ _remote_method(Agent.report_reward, agent_rref, self.id, reward)
122
+
123
+ if done:
124
+ break
125
+
126
+
127
+ class Agent:
128
+ def __init__(self, world_size):
129
+ self.ob_rrefs = []
130
+ self.agent_rref = RRef(self)
131
+ self.rewards = {}
132
+ self.saved_log_probs = {}
133
+ self.policy = Policy()
134
+ self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2)
135
+ self.eps = np.finfo(np.float32).eps.item()
136
+ self.running_reward = 0
137
+ self.reward_threshold = DummyEnv().reward_threshold
138
+ for ob_rank in range(1, world_size):
139
+ ob_info = rpc.get_worker_info(worker_name(ob_rank))
140
+ self.ob_rrefs.append(remote(ob_info, Observer))
141
+ self.rewards[ob_info.id] = []
142
+ self.saved_log_probs[ob_info.id] = []
143
+
144
+ def select_action(self, ob_id, state):
145
+ r"""
146
+ This function is mostly borrowed from the Reinforcement Learning example.
147
+ See https://github.com/pytorch/examples/tree/master/reinforcement_learning
148
+ The main difference is that instead of keeping all probs in one list,
149
+ the agent keeps probs in a dictionary, one key per observer.
150
+
151
+ NB: no need to enforce thread-safety here as GIL will serialize
152
+ executions.
153
+ """
154
+ probs = self.policy(state.unsqueeze(0))
155
+ m = Categorical(probs)
156
+ action = m.sample()
157
+ self.saved_log_probs[ob_id].append(m.log_prob(action))
158
+ return action.item()
159
+
160
+ def report_reward(self, ob_id, reward):
161
+ r"""
162
+ Observers call this function to report rewards.
163
+ """
164
+ self.rewards[ob_id].append(reward)
165
+
166
+ def run_episode(self, n_steps=0):
167
+ r"""
168
+ Run one episode. The agent will tell each observer to run n_steps.
169
+ """
170
+ futs = []
171
+ for ob_rref in self.ob_rrefs:
172
+ # make async RPC to kick off an episode on all observers
173
+ futs.append(
174
+ rpc_async(
175
+ ob_rref.owner(),
176
+ _call_method,
177
+ args=(Observer.run_episode, ob_rref, self.agent_rref, n_steps)
178
+ )
179
+ )
180
+
181
+ # wait until all observers have finished this episode
182
+ for fut in futs:
183
+ fut.wait()
184
+
185
+ def finish_episode(self):
186
+ r"""
187
+ This function is mostly borrowed from the Reinforcement Learning example.
188
+ See https://github.com/pytorch/examples/tree/master/reinforcement_learning
189
+ The main difference is that it joins all probs and rewards from
190
+ different observers into one list, and uses the minimum observer rewards
191
+ as the reward of the current episode.
192
+ """
193
+
194
+ # joins probs and rewards from different observers into lists
195
+ R, probs, rewards = 0, [], []
196
+ for ob_id in self.rewards:
197
+ probs.extend(self.saved_log_probs[ob_id])
198
+ rewards.extend(self.rewards[ob_id])
199
+
200
+ # use the minimum observer reward to calculate the running reward
201
+ min_reward = min(sum(self.rewards[ob_id]) for ob_id in self.rewards)
202
+ self.running_reward = 0.05 * min_reward + (1 - 0.05) * self.running_reward
203
+
204
+ # clear saved probs and rewards
205
+ for ob_id in self.rewards:
206
+ self.rewards[ob_id] = []
207
+ self.saved_log_probs[ob_id] = []
208
+
209
+ policy_loss, returns = [], []
210
+ for r in rewards[::-1]:
211
+ R = r + GAMMA * R
212
+ returns.insert(0, R)
213
+ returns = torch.tensor(returns)
214
+ returns = (returns - returns.mean()) / (returns.std() + self.eps)
215
+ for log_prob, R in zip(probs, returns):
216
+ policy_loss.append(-log_prob * R)
217
+ self.optimizer.zero_grad()
218
+ policy_loss = torch.cat(policy_loss).sum()
219
+ policy_loss.backward()
220
+ self.optimizer.step()
221
+ return min_reward
222
+
223
+
224
+ def run_agent(agent, n_steps):
225
+ for i_episode in count(1):
226
+ agent.run_episode(n_steps=n_steps)
227
+ last_reward = agent.finish_episode()
228
+
229
+ if agent.running_reward > agent.reward_threshold:
230
+ print(f"Solved! Running reward is now {agent.running_reward}!")
231
+ break
232
+
233
+
234
+ class ReinforcementLearningRpcTest(RpcAgentTestFixture):
235
+ @dist_init(setup_rpc=False)
236
+ def test_rl_rpc(self):
237
+ if self.rank == 0:
238
+ # Rank 0 is the agent.
239
+ rpc.init_rpc(
240
+ name=worker_name(self.rank),
241
+ backend=self.rpc_backend,
242
+ rank=self.rank,
243
+ world_size=self.world_size,
244
+ rpc_backend_options=self.rpc_backend_options,
245
+ )
246
+ agent = Agent(self.world_size)
247
+ run_agent(agent, n_steps=int(TOTAL_EPISODE_STEP / (self.world_size - 1)))
248
+
249
+ # Ensure training was run. We don't really care about whether the task was learned,
250
+ # since the purpose of the test is to check the API calls.
251
+ self.assertGreater(agent.running_reward, 0.0)
252
+ else:
253
+ # Other ranks are observers that passively wait for instructions from the agent.
254
+ rpc.init_rpc(
255
+ name=worker_name(self.rank),
256
+ backend=self.rpc_backend,
257
+ rank=self.rank,
258
+ world_size=self.world_size,
259
+ rpc_backend_options=self.rpc_backend_options,
260
+ )
261
+ rpc.shutdown()
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch
4
+ import time
5
+ import torch.distributed.rpc as rpc
6
+ from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs
7
+ from torch.testing._internal.dist_utils import (
8
+ dist_init,
9
+ wait_until_pending_futures_and_users_flushed,
10
+ wait_until_owners_and_forks_on_rank,
11
+ worker_name,
12
+ )
13
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
14
+ RpcAgentTestFixture,
15
+ )
16
+
17
+ def my_sleep_func(seconds=1):
18
+ time.sleep(seconds)
19
+ return torch.mul(torch.tensor(1), torch.tensor(1))
20
+
21
+ @torch.jit.script
22
+ def my_script_func(tensor):
23
+ return torch.add(tensor, tensor)
24
+
25
+ def add_rref_to_value(rref, value):
26
+ return rref.to_here() + value
27
+
28
+ class FaultyAgentRpcTest(RpcAgentTestFixture):
29
+
30
+ # no faulty_messages defined so this fails all retryable messages - see
31
+ # faulty_rpc_agent_test_fixture.py for the list of retryable messages.
32
+ @dist_init(messages_to_delay={})
33
+ def test_check_failed_messages(self):
34
+ if self.rank == 0:
35
+ dst_worker_b = worker_name((self.rank + 1) % self.world_size)
36
+ dst_worker_c = worker_name((self.rank + 2) % self.world_size)
37
+
38
+ # Worker0 sends RPC to Worker1 and creates an RRef there
39
+ rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
40
+ # Worker0 sends an RPC to Worker2 with the RRef as an arg
41
+ rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
42
+ # check if the output is as expected
43
+ self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
44
+ # explicitly delete all User RRefs
45
+ _delete_all_user_and_unforked_owner_rrefs()
46
+
47
+ @dist_init
48
+ def test_verify_backend_options(self):
49
+ self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
50
+ self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
51
+ self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
52
+ self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
53
+ self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
54
+ self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
55
+
56
+ @dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
57
+ def test_custom_faulty_messages(self):
58
+ self.assertEqual(
59
+ {"RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"},
60
+ set(self.rpc_backend_options.messages_to_fail),
61
+ )
62
+
63
+ @dist_init(faulty_messages=[])
64
+ def test_no_faulty_messages(self):
65
+ self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
66
+
67
+ @dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
68
+ def test_custom_messages_to_delay(self):
69
+ self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
70
+
71
+ def _test_remote_message_dropped_pickle(self, dst=None):
72
+ if self.rank != 0:
73
+ return
74
+ dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
75
+ dst_worker = f"worker{dst_rank}"
76
+ # Since we fail python_remote_call messages synchronously, the future
77
+ # corresponding to this remote call will be marked with an error when
78
+ # this function returns.
79
+ rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
80
+ # Call to ensure pending callbacks are run.
81
+ wait_until_pending_futures_and_users_flushed()
82
+ # Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
83
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
84
+ rref._serialize()
85
+ # Test that using RRef as arg over RPC (which forks) results in the same
86
+ # error
87
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
88
+ rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
89
+
90
+ @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
91
+ def test_remote_message_dropped_pickle(self):
92
+ self._test_remote_message_dropped_pickle()
93
+
94
+ @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
95
+ def test_remote_message_dropped_pickle_to_self(self):
96
+ self._test_remote_message_dropped_pickle(self.rank)
97
+
98
+
99
+ def _test_remote_message_dropped_timeout(self, func, args, dst=None):
100
+ if self.rank != 0:
101
+ return
102
+
103
+ # test the case where rpc.remote() message creation is completely dropped.
104
+ dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
105
+ dst_worker = f"worker{dst_rank}"
106
+ # Since we fail python_remote_call messages synchronously, the future
107
+ # corresponding to this remote call will be marked with an error when
108
+ # this function returns.
109
+ rref = rpc.remote(dst_worker, func, args=args)
110
+ # Call to ensure pending callbacks are run.
111
+ wait_until_pending_futures_and_users_flushed()
112
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
113
+ rref.to_here()
114
+ # Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
115
+ # on the owning nodes, this is expected because the OwnerRRef was never
116
+ # successfully created. Therefore, delAllUsers will work as expected.
117
+
118
+ @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
119
+ def test_builtin_remote_message_dropped_timeout(self):
120
+ func = torch.add
121
+ args = (torch.tensor(1), torch.tensor(1))
122
+ self._test_remote_message_dropped_timeout(func, args)
123
+
124
+ @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
125
+ def test_builtin_remote_message_dropped_timeout_to_self(self):
126
+ func = torch.add
127
+ args = (torch.tensor(1), torch.tensor(1))
128
+ self._test_remote_message_dropped_timeout(func, args, dst=0)
129
+
130
+ @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
131
+ def test_udf_remote_message_dropped_timeout(self):
132
+ func = my_sleep_func
133
+ args = (2,)
134
+ self._test_remote_message_dropped_timeout(func, args)
135
+
136
+ @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
137
+ def test_udf_remote_message_dropped_timeout_to_self(self):
138
+ func = my_sleep_func
139
+ args = (2,)
140
+ self._test_remote_message_dropped_timeout(func, args, dst=0)
141
+
142
+ def _test_remote_message_delay_timeout(self, func, args, dst=None):
143
+ if self.rank != 0:
144
+ return
145
+ # Test the case where remote message is eventually processed on the owner,
146
+ # but the future on the creator times out before the response comes back.
147
+ dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
148
+ dst_worker = f"worker{dst_rank}"
149
+ # 10 ms timeout
150
+ rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
151
+ # Future corresponding to the remote creation should time out.
152
+ expected_error = self.get_timeout_error_regex()
153
+ with self.assertRaisesRegex(RuntimeError, expected_error):
154
+ rref._get_future().wait()
155
+
156
+ # Call to ensure pending callbacks are run.
157
+ wait_until_pending_futures_and_users_flushed()
158
+ # to_here() should now pick up that rpc.remote() creation has failed.
159
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
160
+ rref.to_here()
161
+
162
+ # Test the case where rpc.remote() times out, but to_here() has already
163
+ # started blocking before.
164
+ # NOTE: we only test this when not sending to self, as to_here() calls
165
+ # calls localValue(), which does not send an RPC and thus does not have
166
+ # a timeout. This can be supported by allowing future.wait() to
167
+ # take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
168
+ if dst_rank != self.rank:
169
+ slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
170
+
171
+ with self.assertRaisesRegex(RuntimeError, expected_error):
172
+ # to_here() should raise timeout error, since it does not know about the
173
+ # status of rpc.remote().
174
+ slow_rref.to_here(0.001)
175
+ # Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
176
+ # but this can be a noop since it may not exist on the owner yet. Later,
177
+ # the owner can process the RRef creation and wait for the delete message,
178
+ # thus leading to a timeout.
179
+ # Therefore, we wait until we get notification that pending owners have
180
+ # been confirmed before sending out RRefUserDeletes.
181
+ if dst_rank != self.rank:
182
+ wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
183
+
184
+ @dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
185
+ def test_udf_remote_message_delay_timeout(self):
186
+ func = my_sleep_func
187
+ args = (2,)
188
+ self._test_remote_message_delay_timeout(func, args)
189
+
190
+ @dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
191
+ def test_udf_remote_message_delay_timeout_to_self(self):
192
+ func = my_sleep_func
193
+ args = (1,)
194
+ self._test_remote_message_delay_timeout(func, args, dst=0)
195
+
196
+ @dist_init(
197
+ faulty_messages=[],
198
+ messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
199
+ )
200
+ def test_remote_message_builtin_delay_timeout(self):
201
+ func = torch.add
202
+ args = (torch.tensor(1), torch.tensor(1))
203
+ self._test_remote_message_delay_timeout(func, args)
204
+
205
+ @dist_init(
206
+ faulty_messages=[],
207
+ messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
208
+ )
209
+ def test_remote_message_builtin_delay_timeout_to_self(self):
210
+ func = torch.add
211
+ args = (torch.tensor(1), torch.tensor(1))
212
+ self._test_remote_message_delay_timeout(func, args, dst=0)
213
+
214
+ @dist_init(
215
+ faulty_messages=[],
216
+ messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
217
+ )
218
+ def test_remote_message_script_delay_timeout(self):
219
+ func = my_script_func
220
+ args = (torch.tensor(1),)
221
+ self._test_remote_message_delay_timeout(func, args)
222
+
223
+ @dist_init(
224
+ faulty_messages=[],
225
+ messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
226
+ )
227
+ def test_remote_message_script_delay_timeout_to_self(self):
228
+ func = my_script_func
229
+ args = (torch.tensor(1),)
230
+ self._test_remote_message_delay_timeout(func, args, dst=0)
231
+
232
+ @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
233
+ def test_rref_to_here_timeout(self):
234
+ if self.rank != 0:
235
+ return
236
+
237
+ dst_rank = (self.rank + 1) % self.world_size
238
+ dst_worker = f"worker{dst_rank}"
239
+ rref = rpc.remote(
240
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
241
+ )
242
+ expected_error = self.get_timeout_error_regex()
243
+ with self.assertRaisesRegex(RuntimeError, expected_error):
244
+ rref.to_here(0.01)
245
+
246
+ rref.to_here()
247
+
248
+ @dist_init(faulty_messages=[])
249
+ def test_rpc_builtin_timeout(self):
250
+ next_rank = (self.rank + 1) % self.world_size
251
+ dst_worker = worker_name(next_rank)
252
+ expected_error = self.get_timeout_error_regex()
253
+ # PYTHON_CALL message types which correspond to Python UDF over RPC
254
+ # by default get a delay (see faulty_rpc_agent_test_fixture)
255
+ with self.assertRaisesRegex(RuntimeError, expected_error):
256
+ rpc.rpc_sync(
257
+ dst_worker,
258
+ torch.add,
259
+ args=(torch.tensor(1), torch.tensor(1)),
260
+ timeout=1,
261
+ )
262
+
263
+ fut = rpc.rpc_async(
264
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
265
+ )
266
+ with self.assertRaisesRegex(RuntimeError, expected_error):
267
+ fut.wait()
268
+
269
+ # Ensure that the currently set default timeout is large enough such
270
+ # that RPCs with delays still complete.
271
+ fut = rpc.rpc_async(
272
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
273
+ )
274
+ fut.wait()
275
+
276
+ # Ensure timeout if we set a new default and don't override
277
+ rpc._set_rpc_timeout(0.001)
278
+ fut = rpc.rpc_async(
279
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
280
+ )
281
+ with self.assertRaisesRegex(RuntimeError, expected_error):
282
+ fut.wait()
283
+
284
+ # Ensure run to completion if we specify timeout of 0
285
+ fut = rpc.rpc_async(
286
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
287
+ )
288
+ fut.wait()
289
+ # Reset for clean shutdown
290
+ rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
291
+
292
+ @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
293
+ def test_rpc_script_timeout(self):
294
+ next_rank = (self.rank + 1) % self.world_size
295
+ dst_worker = worker_name(next_rank)
296
+ expected_error = self.get_timeout_error_regex()
297
+ with self.assertRaisesRegex(RuntimeError, expected_error):
298
+ rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
299
+
300
+ fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
301
+ with self.assertRaisesRegex(RuntimeError, expected_error):
302
+ fut.wait()
303
+
304
+ # Ensure that the currently set default timeout is large enough such
305
+ # that RPCs with delays still complete.
306
+ fut = rpc.rpc_async(
307
+ dst_worker, my_script_func, args=(torch.tensor(1),)
308
+ )
309
+ fut.wait()
310
+
311
+ # Ensure timeout if we set a new default and don't override
312
+ rpc._set_rpc_timeout(0.001)
313
+ fut = rpc.rpc_async(
314
+ dst_worker, my_script_func, args=(torch.tensor(1),)
315
+ )
316
+ with self.assertRaisesRegex(RuntimeError, expected_error):
317
+ fut.wait()
318
+
319
+ # Ensure run to completion if we specify timeout of 0
320
+ rpc._set_rpc_timeout(0.001)
321
+ fut = rpc.rpc_async(
322
+ dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
323
+ )
324
+ fut.wait()
325
+ # Reset for clean shutdown
326
+ rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch.distributed.rpc as rpc
4
+ import torch.distributed.rpc._testing # noqa: F401
5
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
6
+ RpcAgentTestFixture,
7
+ )
8
+
9
+ # The following message types are currently retried in the RREF protocol and
10
+ # distributed autograd. Thus only these messages should be tested with the
11
+ # Faulty RPC Agent.
12
+ retryable_message_types = ["RREF_FORK_REQUEST",
13
+ "RREF_CHILD_ACCEPT",
14
+ "RREF_USER_DELETE",
15
+ "CLEANUP_AUTOGRAD_CONTEXT_REQ"]
16
+
17
+ # The following messages incur the corresponding delay in seconds while being
18
+ # processed in FaultyTensorPipeAgent's enqueueSend() function.
19
+ default_messages_to_delay = {
20
+ "PYTHON_CALL": 1.5, # Python UDF
21
+ "SCRIPT_CALL": 1.5, # Script/Builtin
22
+ }
23
+
24
+ class FaultyRpcAgentTestFixture(RpcAgentTestFixture):
25
+ def __init__(self, *args, **kwargs):
26
+ super().__init__(*args, **kwargs)
27
+ self.messages_to_fail = retryable_message_types
28
+ self.messages_to_delay = default_messages_to_delay
29
+
30
+ @property
31
+ def rpc_backend(self):
32
+ return rpc.backend_registry.BackendType[
33
+ "FAULTY_TENSORPIPE"
34
+ ]
35
+
36
+ @property
37
+ def rpc_backend_options(self):
38
+ return rpc.backend_registry.construct_rpc_backend_options(
39
+ self.rpc_backend,
40
+ init_method=self.init_method,
41
+ num_worker_threads=8,
42
+ num_fail_sends=3,
43
+ messages_to_fail=self.messages_to_fail,
44
+ messages_to_delay=self.messages_to_delay,
45
+ )
46
+
47
+ def setup_fault_injection(self, faulty_messages, messages_to_delay):
48
+ if faulty_messages is not None:
49
+ self.messages_to_fail = faulty_messages
50
+ if messages_to_delay is not None:
51
+ self.messages_to_delay = messages_to_delay
52
+
53
+ def get_shutdown_error_regex(self):
54
+ error_regexes = [
55
+ "Exception in thread pool task",
56
+ "Connection reset by peer",
57
+ "Connection closed by peer"
58
+ ]
59
+ return "|".join([f"({error_str})" for error_str in error_regexes])
60
+
61
+ def get_timeout_error_regex(self):
62
+ return "RPC ran for more than"
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__init__.py ADDED
File without changes
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (201 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc ADDED
Binary file (6.02 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/dist_autograd_test.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ from typing import Dict, Tuple
4
+
5
+ import torch
6
+ import torch.distributed.autograd as dist_autograd
7
+ import torch.distributed.rpc as rpc
8
+ from torch import Tensor
9
+ from torch.distributed.rpc import rpc_async
10
+ from torch.testing import FileCheck
11
+ from torch.testing._internal.dist_utils import dist_init, worker_name
12
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
13
+ RpcAgentTestFixture,
14
+ )
15
+
16
+
17
+ @torch.jit.script
18
+ def local_add(t1, t2):
19
+ return torch.add(t1, t2)
20
+
21
+
22
+ @torch.jit.script
23
+ def remote_add(t1, t2, dst: str): # noqa: E999
24
+ return rpc_async(dst, local_add, (t1, t2)).wait()
25
+
26
+
27
+ @torch.jit.script
28
+ def fork_add(t1, t2, dst: str):
29
+ fut = torch.jit._fork(remote_add, t1, t2, dst)
30
+ return torch.jit._wait(fut)
31
+
32
+
33
+ class JitDistAutogradTest(RpcAgentTestFixture):
34
+ @dist_init
35
+ def test_get_gradients(self):
36
+ dst_rank = self.rank
37
+
38
+ @torch.jit.script
39
+ def dist_get_gradients(context_id: int) -> (Dict[Tensor, Tensor]):
40
+ return dist_autograd.get_gradients(context_id)
41
+
42
+ FileCheck().check("get_gradients").run(str(dist_get_gradients.graph))
43
+ with dist_autograd.context() as context_id:
44
+ t1 = torch.rand((3, 3), requires_grad=True)
45
+ t2 = torch.rand((3, 3), requires_grad=True)
46
+ t3 = torch.add(t1, t2)
47
+
48
+ dist_autograd.backward(context_id, [t3.sum()])
49
+ grads = dist_get_gradients(context_id)
50
+
51
+ self.assertEqual(2, len(grads))
52
+ self.assertIn(t1, grads)
53
+ self.assertIn(t2, grads)
54
+ self.assertEqual(torch.ones(3, 3), grads[t1])
55
+ self.assertEqual(torch.ones(3, 3), grads[t2])
56
+
57
+ @dist_init
58
+ def test_dist_backward(self):
59
+ if self.rank != 0:
60
+ return
61
+
62
+ @torch.jit.script
63
+ def dist_backward_script(context_id: int, loss: torch.Tensor):
64
+ dist_autograd.backward(context_id, [loss])
65
+
66
+ FileCheck().check("dist_backward").run(str(dist_backward_script.graph))
67
+ with dist_autograd.context() as context_id:
68
+ t1 = torch.rand(3, 3, requires_grad=True)
69
+ t2 = torch.rand(3, 3, requires_grad=True)
70
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
71
+ loss = rpc.rpc_sync(dst_worker_name, torch.add, args=(t1, t2)).sum()
72
+ dist_backward_script(context_id, loss)
73
+
74
+ @dist_init
75
+ def test_jit_fork_within_context(self):
76
+ with dist_autograd.context() as context_id:
77
+ t1 = torch.rand((3, 3), requires_grad=True)
78
+ t2 = torch.rand((3, 3), requires_grad=True)
79
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
80
+ res = fork_add(t1, t2, dst_worker_name)
81
+ loss = res.sum()
82
+ dist_autograd.backward(context_id, [loss])
83
+
84
+ grads = dist_autograd.get_gradients(context_id)
85
+ self.assertEqual(2, len(grads))
86
+ self.assertIn(t1, grads)
87
+ self.assertIn(t2, grads)
88
+
89
+ @dist_init
90
+ def test_restore_context_after_swtich_to_jit_thread(self):
91
+ if self.rank != 0:
92
+ return
93
+
94
+ @torch.jit.script
95
+ def forward_script(
96
+ context_id: int, dst_worker_name: str, t1: Tensor, t2: Tensor
97
+ ) -> Tuple[Tensor, Tensor]:
98
+ res1_fut = rpc.rpc_async(dst_worker_name, local_add, (t1, t1))
99
+ res1 = res1_fut.wait() # After this, the script runs in a new JIT thread.
100
+ loss1 = res1.sum()
101
+
102
+ # SendRpcBackward is not attached, since DistAutogradContext is lost here.
103
+ res2_fut = rpc.rpc_async(dst_worker_name, local_add, (t2, t2))
104
+ res2 = res2_fut.wait()
105
+ loss2 = res2.sum()
106
+
107
+ return loss1, loss2
108
+
109
+ with dist_autograd.context() as context_id:
110
+ t1 = torch.ones((2, 3), requires_grad=True)
111
+ t2 = torch.ones((2, 3), requires_grad=True)
112
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
113
+ loss0, loss1 = forward_script(context_id, dst_worker_name, t1, t2)
114
+ dist_autograd.backward(context_id, [loss0, loss1])
115
+ grad0, grad1 = dist_autograd.get_gradients(context_id)
116
+ self.assertEqual(grad0, grad1)
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py ADDED
@@ -0,0 +1,1385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import time
4
+ import io
5
+ from typing import Dict, List, Tuple, Any
6
+
7
+ import torch
8
+ import torch.distributed as dist
9
+ import torch.distributed.rpc as rpc
10
+ from torch import Tensor
11
+ from torch.autograd.profiler import record_function
12
+ from torch.distributed.rpc import RRef
13
+ from torch.distributed.rpc.internal import RPCExecMode, _build_rpc_profiling_key
14
+ from torch.futures import Future
15
+ from torch.testing._internal.common_utils import TemporaryFileName
16
+ from torch.testing._internal.dist_utils import (
17
+ dist_init,
18
+ get_function_event,
19
+ initialize_pg,
20
+ worker_name,
21
+ )
22
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
23
+ RpcAgentTestFixture,
24
+ )
25
+
26
+ from torch.autograd.profiler_legacy import profile as _profile
27
+
28
+ def rref_isinstance(rref, cls_to_check):
29
+ return isinstance(rref.local_value(), cls_to_check)
30
+
31
+ def sleep(t):
32
+ time.sleep(t)
33
+
34
+
35
+ def rpc_return_rref(dst):
36
+ return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
37
+
38
+
39
+ @torch.jit.script
40
+ def rref_local_value(rref: RRef[Tensor]) -> Tensor:
41
+ return rref.local_value()
42
+
43
+
44
+ @torch.jit.script
45
+ def list_create() -> List[int]:
46
+ global_list = [1, 2, 3]
47
+ return global_list
48
+
49
+
50
+ @torch.jit.script
51
+ def rref_list_mutate(rref: RRef[List[int]]) -> None:
52
+ rref.local_value().append(4)
53
+ rref.to_here().append(5)
54
+ rref.to_here(5.0).append(6)
55
+
56
+
57
+ def return_value(value: int) -> int:
58
+ return value
59
+
60
+
61
+ class RRefAPITest:
62
+ @dist_init
63
+ def test_rref_is_owner(self):
64
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
65
+ rref_var = rpc_return_rref(dst_worker_name)
66
+
67
+ @torch.jit.script
68
+ def rref_tensor_is_owner(rref_var: RRef[Tensor]) -> bool:
69
+ return rref_var.is_owner()
70
+
71
+ res = rref_tensor_is_owner(rref_var)
72
+ self.assertEqual(res, False)
73
+
74
+ @dist_init
75
+ def test_rref_local_value(self):
76
+ if self.rank != 0:
77
+ return
78
+
79
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
80
+ rref = rpc_return_rref(dst_worker_name)
81
+
82
+ with self.assertRaisesRegex(
83
+ RuntimeError, r"Can't call RRef.local_value\(\) on a non-owner RRef"
84
+ ):
85
+ rref_local_value(rref)
86
+
87
+ ret = ret = rpc.rpc_sync(dst_worker_name, rref_local_value, (rref,))
88
+ self.assertEqual(ret, torch.add(torch.ones(2, 2), 1))
89
+
90
+ @dist_init
91
+ def test_local_rref_local_value(self):
92
+ if self.rank != 0:
93
+ return
94
+
95
+ dst_worker_name = worker_name(self.rank)
96
+ rref = rpc.remote(dst_worker_name, return_value, (5,), {})
97
+
98
+ ret = rref_local_value(rref)
99
+ self.assertEqual(ret, 5)
100
+
101
+ def _create_rref(self):
102
+ owner_rank = (self.rank + 2) % self.world_size
103
+ return rpc.remote(
104
+ worker_name(owner_rank), torch.add, args=(torch.zeros(2, 2), 1)
105
+ )
106
+
107
+ @dist_init
108
+ def test_user_rrefs_confirmed(self):
109
+ dst_rank = (self.rank + 1) % self.world_size
110
+ rref = self._create_rref()
111
+ ret = rpc.rpc_sync(
112
+ worker_name(dst_rank), script_check_rref_confirmed, args=(rref,)
113
+ )
114
+ self.assertEqual(ret, True)
115
+
116
+ @dist_init
117
+ def test_user_rrefs_confirmed_remote(self):
118
+ dst_rank = (self.rank + 1) % self.world_size
119
+ rref = self._create_rref()
120
+ ret_rref = rpc.remote(
121
+ worker_name(dst_rank), script_check_rref_confirmed, args=(rref,)
122
+ )
123
+ self.assertEqual(ret_rref.to_here(), True)
124
+
125
+ @dist_init
126
+ def test_rref_list_mutate(self):
127
+ dst = worker_name((self.rank + 1) % self.world_size)
128
+ list_rref = rpc.remote(dst, list_create)
129
+
130
+ rpc.rpc_sync(dst, rref_list_mutate, args=(list_rref,))
131
+ self.assertEqual(list_rref.to_here(), [1, 2, 3, 4, 5, 6])
132
+
133
+
134
+ @torch.jit.script
135
+ def no_arg():
136
+ return 0
137
+
138
+
139
+ @torch.jit.script
140
+ def one_arg(value):
141
+ return value + 1
142
+
143
+ @torch.jit.script
144
+ def script_add_ones(x):
145
+ return torch.add(x, torch.ones(1))
146
+
147
+ @torch.jit.script
148
+ def script_add_ones_with_record_function(x, block: str):
149
+ with record_function(block):
150
+ return torch.add(x, torch.ones(1))
151
+
152
+
153
+ @torch.jit.script
154
+ def record_function_on_caller_rpc_async(dst_worker_name: str, block: str) -> Tensor:
155
+ t: Tensor = torch.ones(1)
156
+ with record_function(block) as rf:
157
+ fut1 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, ))
158
+ # Extra operator call to avoid de-duplication of the next async call
159
+ # see https://github.com/pytorch/pytorch/pull/62710#discussion_r694680279
160
+ zero = torch.zeros_like(t)
161
+ fut2 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, ))
162
+ res = fut1.wait() + fut2.wait() + zero
163
+ return res
164
+
165
+
166
+
167
+ @torch.jit.script
168
+ def script_fork_wait_udf(tensor):
169
+ fut = torch.jit._fork(script_add_ones, tensor)
170
+ x = torch.jit._wait(fut)
171
+ return x
172
+
173
+
174
+ @torch.jit.script
175
+ def rref_to_here(rref_var: RRef[Tensor]) -> Tensor:
176
+ return rref_var.to_here()
177
+
178
+
179
+ @torch.jit.script
180
+ def return_rref(rref_var: RRef[Tensor]) -> RRef[Tensor]:
181
+ return rref_var
182
+
183
+
184
+ @torch.jit.script
185
+ def script_raise_func(value):
186
+ if value.numel() == 2:
187
+ raise ValueError("Expected error")
188
+ return value + 1
189
+
190
+
191
+ @torch.jit.script
192
+ def script_fork_wait_throw(invalue):
193
+ fut = torch.jit._fork(script_raise_func, invalue)
194
+ value = torch.jit._wait(fut)
195
+ return value
196
+
197
+
198
+ @torch.jit.script
199
+ def call_rpc_with_profiling(record: torch.classes.profiler._RecordFunction, dst_worker_name: str) -> Tensor:
200
+ # Call rpc_async from within ScriptFunction and ensure that we can attach
201
+ # profiling callbacks. Note that handle here is a Tensor representation of
202
+ # RecordFunction.
203
+ fut = rpc.rpc_async(dst_worker_name, one_arg, (torch.tensor(1),))
204
+ torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut)
205
+ ret = fut.wait()
206
+ return ret
207
+
208
+ @torch.jit.script
209
+ def call_rpc_torchscript_with_record_function(dst_worker_name: str, block: str) -> Tensor:
210
+ fut = rpc.rpc_async(dst_worker_name, script_add_ones_with_record_function, (torch.tensor(1), block))
211
+ return fut.wait()
212
+
213
+
214
+ @torch.jit.script
215
+ def call_fork_with_profiling(record: torch.classes.profiler._RecordFunction) -> Tensor:
216
+ # Call fork from within ScriptFunction and ensure that we can attach profiling
217
+ # callbacks to the resulting future. Note that handle here is a Tensor
218
+ # representation of RecordFunction.
219
+ fut = torch.jit._fork(one_arg, torch.tensor(1))
220
+ torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut)
221
+ ret = fut.wait()
222
+ return ret
223
+
224
+
225
+ class MyScriptModuleWithRRefs(torch.jit.ScriptModule):
226
+ def __init__(self, dst_worker):
227
+ super().__init__()
228
+ self.rrefs = []
229
+ for _ in range(4):
230
+ self.rrefs.append(rpc_return_rref(dst_worker))
231
+
232
+ @torch.jit.script_method
233
+ def forward(self) -> Tensor:
234
+ res_tensor = torch.ones(2, 2)
235
+ for rref in self.rrefs:
236
+ res_tensor += rref.to_here()
237
+
238
+ return res_tensor
239
+
240
+
241
+ @torch.jit.ignore
242
+ def rref_python_annotation(rref_var: RRef[Tensor]) -> RRef[Tensor]:
243
+ return rref_var
244
+
245
+
246
+ @torch.jit.script
247
+ def rref_script_annotation(rref_var: RRef[Tensor]) -> Tensor:
248
+ return rref_python_annotation(rref_var).to_here()
249
+
250
+
251
+ class RRefTypingTest:
252
+ @dist_init
253
+ def test_rref_as_arg_and_return(self):
254
+ n = self.rank + 1
255
+ dst_rank = n % self.world_size
256
+ local_ret = one_arg(torch.ones(2, 2))
257
+
258
+ # create rref on current rank
259
+ rref = rpc.remote(worker_name(self.rank), one_arg, args=(torch.ones(2, 2),))
260
+
261
+ # pass rref to another user in rpc call
262
+ ret = rpc.rpc_sync(worker_name(dst_rank), rref_to_here, args=(rref,))
263
+ self.assertEqual(ret, local_ret)
264
+
265
+ # return rref in rpc call
266
+ rref1 = rpc.rpc_sync(worker_name(dst_rank), return_rref, args=(rref,))
267
+ self.assertEqual(rref1.to_here(), local_ret)
268
+
269
+ # pass rref to another user in remote call
270
+ rref2 = rpc.remote(worker_name(dst_rank), rref_to_here, args=(rref,))
271
+ self.assertEqual(rref2.to_here(), local_ret)
272
+
273
+ # return rref in remote call
274
+ rref3 = rpc.remote(worker_name(dst_rank), return_rref, args=(rref,))
275
+ self.assertEqual(rref3.to_here().to_here(), local_ret)
276
+
277
+ @dist_init
278
+ def test_my_script_module_with_rrefs(self):
279
+ n = self.rank + 1
280
+ dst_rank = n % self.world_size
281
+
282
+ module_with_rrefs = MyScriptModuleWithRRefs(worker_name(dst_rank))
283
+ res = module_with_rrefs()
284
+ self.assertEqual(res, torch.ones(2, 2) * 9)
285
+
286
+ @dist_init
287
+ def test_rref_python_annotation(self):
288
+ n = self.rank + 1
289
+ dst_rank = n % self.world_size
290
+ rref_var = rpc_return_rref(worker_name(dst_rank))
291
+
292
+ res = rref_script_annotation(rref_var)
293
+ self.assertEqual(res, torch.ones(2, 2) + 1)
294
+
295
+
296
+ class FutureTypingTest:
297
+ @dist_init
298
+ def test_future_passed_between_python_and_jit(self):
299
+ dst_rank = (self.rank + 1) % self.world_size
300
+ inputs = (torch.tensor([1, 1]), torch.tensor([2, 2]))
301
+ ret_fut = rpc.rpc_async(worker_name(dst_rank), two_args_two_kwargs, args=inputs)
302
+ expected_res = torch.tensor([10, 10])
303
+
304
+ @torch.jit.script
305
+ def future_wait_in_script(fut: Future[Tensor]) -> Tensor:
306
+ return fut.wait()
307
+
308
+ self.assertEqual(future_wait_in_script(ret_fut), expected_res)
309
+
310
+ @torch.jit.script
311
+ def future_return_to_python(
312
+ dst_rank: int, inputs: Tuple[Tensor, Tensor]
313
+ ) -> Future[Tensor]:
314
+ return rpc.rpc_async(
315
+ f"worker{dst_rank}", two_args_two_kwargs, inputs
316
+ )
317
+
318
+ fut_res = future_return_to_python(dst_rank, inputs)
319
+ self.assertEqual(fut_res.wait(), expected_res)
320
+
321
+ @dist_init
322
+ def test_future_python_annotation(self):
323
+ if self.rank != 0:
324
+ return
325
+
326
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
327
+ input_0 = torch.ones(2, 2)
328
+ input_1 = 1
329
+ expected_res = torch.add(input_0, input_1)
330
+
331
+ @torch.jit.ignore
332
+ def python_return_future() -> Future[Tensor]:
333
+ fut = rpc.rpc_async(dst_worker_name, torch.add, (input_0, input_1), {})
334
+ return fut
335
+
336
+ @torch.jit.script
337
+ def script_use_future() -> Tensor:
338
+ fut = python_return_future()
339
+ return fut.wait()
340
+
341
+ res = script_use_future()
342
+ self.assertEqual(res, expected_res)
343
+
344
+
345
+ @torch.jit.script
346
+ class MyScriptClass:
347
+ def __init__(self, a: int):
348
+ self.a = a
349
+
350
+ def get_value(self) -> int:
351
+ return self.a
352
+
353
+
354
+ @torch.jit.interface
355
+ class MyModuleInterface(torch.nn.Module):
356
+ def forward(self) -> Tensor:
357
+ # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
358
+ pass
359
+
360
+
361
+ class MyScriptModule(torch.jit.ScriptModule):
362
+ def __init__(self, rank):
363
+ super().__init__()
364
+ self.a = torch.ones(rank)
365
+
366
+ @torch.jit.script_method
367
+ def forward(self) -> Tensor:
368
+ return self.a
369
+
370
+ @torch.jit.script_method
371
+ def custom_func(self) -> Tensor:
372
+ return self.a
373
+
374
+
375
+ def owner_create_rref_my_script_class(a):
376
+ return rpc.RRef(MyScriptClass(a))
377
+
378
+
379
+ def owner_create_rref_my_script_module(a):
380
+ return rpc.RRef(MyScriptModule(a), type_hint=MyModuleInterface)
381
+
382
+
383
+ @torch.jit.script
384
+ def script_rref_get_value_my_script_class(rref: RRef[MyScriptClass]) -> int:
385
+ return rref.to_here().get_value()
386
+
387
+
388
+ @torch.jit.script
389
+ def script_rref_run_forward_my_script_module(rref: RRef[MyModuleInterface]) -> Tensor:
390
+ return rref.to_here().forward()
391
+
392
+
393
+ class LocalRRefTest:
394
+ @dist_init
395
+ def test_create_local_script_class_rref_in_py(self):
396
+ if self.rank != 0:
397
+ return
398
+
399
+ # Create a local RRef<MyScriptClass>.
400
+ rref_script_class = rpc.RRef(MyScriptClass(self.rank))
401
+ ret = rref_script_class.to_here().get_value()
402
+ self.assertEqual(ret, self.rank)
403
+
404
+ @dist_init
405
+ def test_create_local_script_module_rref_in_py(self):
406
+ if self.rank != 0:
407
+ return
408
+
409
+ # Create a local RRef<MyModuleInterface>.
410
+ rref_script_module = rpc.RRef(MyScriptModule(self.rank), MyModuleInterface)
411
+ ret = rref_script_module.to_here().forward()
412
+ self.assertEqual(ret, torch.ones(self.rank))
413
+
414
+ # Create a local RRef<MyModuleInterface> without type hint.
415
+ with self.assertRaisesRegex(
416
+ RuntimeError,
417
+ (
418
+ "The RRef being created contains a ScriptModule, "
419
+ "must provide its ModuleInterface type hint."
420
+ ),
421
+ ):
422
+ rref_script_module = rpc.RRef(MyScriptModule(self.rank))
423
+
424
+ @dist_init
425
+ def test_return_local_script_class_rref_in_py_and_use_in_script(self):
426
+ if self.rank != 0:
427
+ return
428
+
429
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
430
+
431
+ # Create a local RRef<MyScriptClass> remotely in Python.
432
+ rref = rpc.rpc_sync(
433
+ dst_worker_name, owner_create_rref_my_script_class, args=(self.rank,)
434
+ )
435
+
436
+ def use_rref_on_owner(rref: RRef[MyScriptClass]) -> int:
437
+ args = (rref,)
438
+ kwargs: Dict[str, Any] = {}
439
+ fut = rpc.rpc_async(
440
+ rref.owner(), script_rref_get_value_my_script_class, args, kwargs
441
+ )
442
+ ret = fut.wait()
443
+ return ret
444
+
445
+ # Use RRef<MyScriptClass> in local Python RPC and remote Script run.
446
+ ret = use_rref_on_owner(rref)
447
+ self.assertEqual(ret, self.rank)
448
+
449
+ # Use RRef<MyScriptClass> in local Script RPC and remote Script run.
450
+ use_rref_on_owner_script = torch.jit.script(use_rref_on_owner)
451
+ ret = use_rref_on_owner_script(rref)
452
+ self.assertEqual(ret, self.rank)
453
+
454
+ @dist_init
455
+ def test_return_local_script_module_rref_in_py_and_use_in_script(self):
456
+ if self.rank != 0:
457
+ return
458
+
459
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
460
+
461
+ # Create a local RRef<MyModuleInterface> remotely in Python.
462
+ rref = rpc.rpc_sync(
463
+ dst_worker_name, owner_create_rref_my_script_module, args=(self.rank,)
464
+ )
465
+
466
+ def use_rref_on_owner(rref: RRef[MyModuleInterface]) -> Tensor:
467
+ args = (rref,)
468
+ kwargs: Dict[str, Any] = {}
469
+ fut = rpc.rpc_async(
470
+ rref.owner_name(),
471
+ script_rref_run_forward_my_script_module,
472
+ args,
473
+ kwargs,
474
+ )
475
+ ret = fut.wait()
476
+ return ret
477
+
478
+ # Use RRef<MyScriptClass> in local Python RPC and remote Script run.
479
+ ret = use_rref_on_owner(rref)
480
+ self.assertEqual(ret, torch.ones(self.rank))
481
+
482
+ # Use RRef<MyScriptClass> in local Script RPC and remote Script run.
483
+ use_rref_on_owner_script = torch.jit.script(use_rref_on_owner)
484
+ ret = use_rref_on_owner_script(rref)
485
+ self.assertEqual(ret, torch.ones(self.rank))
486
+
487
+
488
+ def python_function():
489
+ return 0
490
+
491
+
492
+ @torch.jit.script
493
+ def two_args_two_kwargs(
494
+ first_arg,
495
+ second_arg,
496
+ first_kwarg=torch.tensor([3, 3]),
497
+ second_kwarg=torch.tensor([4, 4]),
498
+ ):
499
+ return first_arg + second_arg + first_kwarg + second_kwarg
500
+
501
+
502
+ @torch.jit.script
503
+ def assorted_types_args_kwargs(
504
+ tensor_arg: Tensor, # noqa: E999
505
+ str_arg: str,
506
+ int_arg: int,
507
+ tensor_kwarg: Tensor = torch.tensor([2, 2]),
508
+ str_kwarg: str = "str_kwarg",
509
+ int_kwarg: int = 2,
510
+ ):
511
+ return tensor_arg + tensor_kwarg, str_arg + str_kwarg, int_arg + int_kwarg
512
+
513
+
514
+ @torch.jit.script
515
+ def raise_script():
516
+ raise RuntimeError("Expected error")
517
+
518
+
519
+ @torch.jit.script
520
+ def script_rpc_async_call(
521
+ dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
522
+ ):
523
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
524
+ ret = fut.wait()
525
+ return ret
526
+
527
+ @torch.jit.script
528
+ def script_rpc_sync_call(
529
+ dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
530
+ ):
531
+ res = rpc.rpc_sync(dst_worker_name, two_args_two_kwargs, args, kwargs)
532
+ return res
533
+
534
+ @torch.jit.script
535
+ def script_rpc_remote_call(
536
+ dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
537
+ ):
538
+ rref_res = rpc.remote(dst_worker_name, two_args_two_kwargs, args, kwargs)
539
+ return rref_res.to_here()
540
+
541
+ class JitRpcOpTest:
542
+ # Call functions remotely from Script.
543
+ @dist_init
544
+ def test_all_kwargs_are_populated_by_defaults(self):
545
+ if self.rank != 0:
546
+ return
547
+
548
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
549
+
550
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
551
+ kwargs = {}
552
+
553
+ for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]:
554
+ ret = script_op(
555
+ dst_worker_name, args, kwargs
556
+ )
557
+ self.assertEqual(ret, torch.tensor([10, 10]))
558
+
559
+ @dist_init
560
+ def test_some_kwargs_are_populated_by_defaults(self):
561
+ if self.rank != 0:
562
+ return
563
+
564
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
565
+
566
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
567
+ kwargs = {"first_kwarg": torch.tensor([2, 2])}
568
+
569
+ for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]:
570
+ ret = script_op(
571
+ dst_worker_name, args, kwargs
572
+ )
573
+ self.assertEqual(ret, torch.tensor([9, 9]))
574
+
575
+ @dist_init
576
+ def test_no_kwargs_are_populated_by_defaults(self):
577
+ if self.rank != 0:
578
+ return
579
+
580
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
581
+
582
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
583
+ kwargs = {
584
+ "first_kwarg": torch.tensor([2, 2]),
585
+ "second_kwarg": torch.tensor([3, 3]),
586
+ }
587
+ for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]:
588
+ ret = script_op(
589
+ dst_worker_name, args, kwargs
590
+ )
591
+ self.assertEqual(ret, torch.tensor([8, 8]))
592
+
593
+ @dist_init
594
+ def test_args_and_kwargs_contain_different_types(self):
595
+ if self.rank != 0:
596
+ return
597
+
598
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
599
+
600
+ @torch.jit.script
601
+ def script_rpc_async_call_with_assorted_types(
602
+ dst_worker_name: str,
603
+ ):
604
+ args = (torch.tensor([1, 1]), "str_arg", 1)
605
+ # Must annotate the value type as `Any`, because JIT type inference
606
+ # does not support multiple types when defining a Dict.
607
+ # The error JIT gives is,
608
+ # "Dict values must contain only a single type, "
609
+ # "expected: Tensor but found str instead."
610
+ kwargs: Dict[str, Any] = {
611
+ "tensor_kwarg": torch.tensor([3, 3]),
612
+ "str_kwarg": "_str_kwarg",
613
+ "int_kwarg": 3,
614
+ }
615
+ fut = rpc.rpc_async(
616
+ dst_worker_name, assorted_types_args_kwargs, args, kwargs
617
+ )
618
+ ret = fut.wait()
619
+ return ret
620
+
621
+ ret = script_rpc_async_call_with_assorted_types(
622
+ dst_worker_name
623
+ )
624
+ self.assertEqual(ret, (torch.tensor([4, 4]), "str_arg_str_kwarg", 4))
625
+
626
+ @dist_init
627
+ def test_kwargs_not_passed(self):
628
+ if self.rank != 0:
629
+ return
630
+
631
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
632
+
633
+ @torch.jit.script
634
+ def script_rpc_async_call_without_kwargs_passed(
635
+ dst_worker_name: str,
636
+ ):
637
+ args = ()
638
+ fut = rpc.rpc_async(dst_worker_name, no_arg, args)
639
+ ret = fut.wait()
640
+ return ret
641
+
642
+ ret = script_rpc_async_call_without_kwargs_passed(
643
+ dst_worker_name
644
+ )
645
+ self.assertEqual(ret, 0)
646
+
647
+ @dist_init
648
+ def test_args_kwargs_are_neither_passed(self):
649
+ if self.rank != 0:
650
+ return
651
+
652
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
653
+
654
+ @torch.jit.script
655
+ def script_rpc_async_call_without_args_kwargs_passed(
656
+ dst_worker_name: str,
657
+ ):
658
+ fut = rpc.rpc_async(dst_worker_name, no_arg)
659
+ ret = fut.wait()
660
+ return ret
661
+
662
+ ret = script_rpc_async_call_without_args_kwargs_passed(
663
+ dst_worker_name
664
+ )
665
+ self.assertEqual(ret, 0)
666
+
667
+ @dist_init
668
+ def test_less_than_needed_args_are_specified(self):
669
+ if self.rank != 0:
670
+ return
671
+
672
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
673
+
674
+ # Notice, args matching happens during scripting.
675
+ with self.assertRaisesRegex(RuntimeError, "Argument second_arg not provided"):
676
+
677
+ @torch.jit.script
678
+ def script_rpc_async_call_with_less_args(
679
+ dst_worker_name: str, # noqa: E999
680
+ ):
681
+ args = (torch.tensor([1, 1]),)
682
+ kwargs = {}
683
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
684
+ ret = fut.wait()
685
+ return ret
686
+
687
+ @dist_init
688
+ def test_more_than_needed_args_are_specified(self):
689
+ if self.rank != 0:
690
+ return
691
+
692
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
693
+
694
+ # Notice, args matching happens during scripting.
695
+ with self.assertRaisesRegex(
696
+ RuntimeError,
697
+ "Expected at most 4 arguments but found 5 positional arguments",
698
+ ):
699
+
700
+ @torch.jit.script
701
+ def script_rpc_async_call_with_more_args(
702
+ dst_worker_name: str,
703
+ ):
704
+ args = (
705
+ torch.tensor([1, 1]),
706
+ torch.tensor([2, 2]),
707
+ torch.tensor([3, 3]),
708
+ torch.tensor([4, 4]),
709
+ torch.tensor([5, 5]),
710
+ )
711
+ kwargs = {}
712
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
713
+ ret = fut.wait()
714
+ return ret
715
+
716
+ @dist_init
717
+ def test_unexepected_kwarg_is_specified(self):
718
+ if self.rank != 0:
719
+ return
720
+
721
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
722
+
723
+ # Notice, kwargs matching happens during execution.
724
+ @torch.jit.script
725
+ def script_rpc_async_call_with_unexpected_kwarg(
726
+ dst_worker_name: str, # noqa: E999
727
+ ):
728
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
729
+ kwargs = {"third_kwarg": torch.tensor([1, 1])}
730
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
731
+ ret = fut.wait()
732
+ return ret
733
+
734
+ with self.assertRaisesRegex(
735
+ RuntimeError, "Unknown keyword argument 'third_kwarg'"
736
+ ):
737
+ ret = script_rpc_async_call_with_unexpected_kwarg(
738
+ dst_worker_name
739
+ )
740
+ self.assertEqual(ret, 0)
741
+
742
+ @dist_init
743
+ def test_call_python_function_remotely_from_script_not_supported(self):
744
+ if self.rank != 0:
745
+ return
746
+
747
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
748
+
749
+ @torch.jit.script
750
+ def rpc_async_call_remote_py_function_in_torchscript(dst_worker_name: str):
751
+ args = ()
752
+ kwargs = {}
753
+ fut = rpc.rpc_async(dst_worker_name, python_function, args, kwargs)
754
+ ret = fut.wait()
755
+ return ret
756
+
757
+ with self.assertRaisesRegex(
758
+ RuntimeError, "attempted to get undefined function"
759
+ ):
760
+ ret = rpc_async_call_remote_py_function_in_torchscript(dst_worker_name)
761
+ self.assertEqual(ret, 0)
762
+
763
+ @dist_init
764
+ def test_call_script_function_that_raises_remotely_from_script(self):
765
+ if self.rank != 0:
766
+ return
767
+
768
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
769
+
770
+ # Notice, TorchScript always translates(emits) Python `raise` statement,
771
+ # as the exception message string, "Exception",
772
+ # no matter what exception type and exception message are in the statement,
773
+ @torch.jit.script
774
+ def rpc_async_call_remote_raising_torchscript_in_torchscript(
775
+ dst_worker_name: str,
776
+ ):
777
+ args = ()
778
+ kwargs = {}
779
+ fut = rpc.rpc_async(dst_worker_name, raise_script, args, kwargs)
780
+ ret = fut.wait()
781
+ return ret
782
+
783
+ with self.assertRaisesRegex(RuntimeError, "Expected error"):
784
+ ret = rpc_async_call_remote_raising_torchscript_in_torchscript(
785
+ dst_worker_name
786
+ )
787
+ self.assertEqual(ret, 0)
788
+
789
+ @dist_init
790
+ def test_call_script_function_that_not_exists_remotely_from_script(self):
791
+ if self.rank != 0:
792
+ return
793
+
794
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
795
+
796
+ @torch.jit.script
797
+ def nonexisting_script():
798
+ return 0
799
+
800
+ @torch.jit.script
801
+ def rpc_async_call_remote_nonexisting_torchscript_in_torchscript(
802
+ dst_worker_name: str,
803
+ ):
804
+ args = ()
805
+ kwargs = {}
806
+ fut = rpc.rpc_async(dst_worker_name, nonexisting_script, args, kwargs)
807
+ ret = fut.wait()
808
+ return ret
809
+
810
+ with self.assertRaisesRegex(
811
+ RuntimeError, "attempted to get undefined function nonexisting_script"
812
+ ):
813
+ ret = rpc_async_call_remote_nonexisting_torchscript_in_torchscript(
814
+ dst_worker_name
815
+ )
816
+ self.assertEqual(ret, 0)
817
+
818
+
819
+ @torch.jit.ignore
820
+ def my_script_module_init(rank: int) -> MyModuleInterface:
821
+ return MyScriptModule(rank)
822
+
823
+
824
+ @torch.jit.script
825
+ def construct_my_script_module(rank: int) -> MyModuleInterface:
826
+ return my_script_module_init(rank)
827
+
828
+
829
+ @torch.jit.script
830
+ def run_ref_script_module(
831
+ ref_script_module: RRef[MyModuleInterface], t: Tensor
832
+ ) -> Tensor:
833
+ module = ref_script_module.to_here()
834
+ return module.forward() + t
835
+
836
+
837
+ @torch.jit.script
838
+ def script_check_rref_confirmed(rref: RRef[Tensor]) -> bool:
839
+ return rref.confirmed_by_owner()
840
+
841
+
842
+ @torch.jit.script
843
+ def save_rref(rref_var: RRef[Tensor], fname: str) -> None:
844
+ torch.save(rref_var, fname)
845
+
846
+
847
+ @torch.jit.script
848
+ def script_add(x: Tensor, y: Tensor) -> Tensor:
849
+ return x + y
850
+
851
+
852
+ @rpc.functions.async_execution
853
+ @torch.jit.script
854
+ def async_add(to: str, x: Tensor, y: Tensor) -> Future[Tensor]:
855
+ return rpc.rpc_async(to, script_add, (x, y))
856
+
857
+
858
+ @rpc.functions.async_execution
859
+ @torch.jit.script
860
+ def async_wrong_type() -> Tensor:
861
+ return torch.zeros(2)
862
+
863
+
864
+ def load_script_module_with_pickled_rref(pickled_script_module):
865
+ f = io.BytesIO(pickled_script_module)
866
+ m = torch.jit.load(f)
867
+ return m()
868
+
869
+
870
+ class JitRpcTest(
871
+ RRefAPITest,
872
+ RRefTypingTest,
873
+ LocalRRefTest,
874
+ JitRpcOpTest,
875
+ FutureTypingTest,
876
+ RpcAgentTestFixture,
877
+ ):
878
+ @dist_init
879
+ def test_torchscript_function(self):
880
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
881
+ local_ret = one_arg(torch.ones(2, 2))
882
+ ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(torch.ones(2, 2),))
883
+ self.assertEqual(ret, local_ret)
884
+ rref = rpc.remote(dst_worker_name, one_arg, args=(torch.ones(2, 2),))
885
+ self.assertEqual(rref.to_here(), local_ret)
886
+ # create rref to itself
887
+ local_rref = rpc.remote(
888
+ worker_name(self.rank), one_arg, args=(torch.ones(2, 2),)
889
+ )
890
+ self.assertEqual(local_rref.to_here(), local_ret)
891
+
892
+ @dist_init
893
+ def test_torchscript_function_exception(self):
894
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
895
+ with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"):
896
+ ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(10, 20))
897
+
898
+ with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"):
899
+ rref = rpc.remote(dst_worker_name, one_arg, args=(10, 20))
900
+
901
+ @dist_init
902
+ def test_torchscript_functions_not_supported(self):
903
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
904
+
905
+ my_local_script_module = MyScriptModule(self.rank)
906
+
907
+ # It is not thread safe to instantiate MyScriptModule in multiple threads,
908
+ # wait for local MyScriptModule instantiation to finish,
909
+ # otherwise it could instantiate MyScriptModule in parallel with
910
+ # server thread in the below
911
+ initialize_pg(self.file_init_method, self.rank, self.world_size)
912
+ dist.barrier()
913
+
914
+ # rpc_sync still accepts script class and run it in
915
+ # the same code path as python call.
916
+ ret = rpc.rpc_sync(dst_worker_name, MyScriptClass, args=(self.rank,))
917
+
918
+ # rpc_sync does not accept script module method.
919
+ # Python 3.5 and Python 3.6 throw different error message, the only
920
+ # common word can be greped is "pickle".
921
+ with self.assertRaisesRegex(TypeError, "pickle"):
922
+ ret = rpc.rpc_async(
923
+ dst_worker_name, my_local_script_module.forward, args=()
924
+ )
925
+
926
+ @dist_init
927
+ def test_remote_script_module(self):
928
+ # TODO, need more investigation
929
+ # there is rref leak when shutting down, suspect it is because
930
+ # ref as arg is passed to pybind boundary, and the ref is not garbage
931
+ # collected by python when calling shutdown()
932
+ import torch.distributed.rpc.api as api
933
+
934
+ api._ignore_rref_leak = True
935
+
936
+ local_ret = torch.ones(self.rank) + torch.ones(self.rank)
937
+
938
+ n = self.rank + 1
939
+ dst_rank = n % self.world_size
940
+ remote_ref = rpc.remote(
941
+ worker_name(dst_rank), construct_my_script_module, args=(self.rank,)
942
+ )
943
+
944
+ # pass rref arg to owner
945
+ ret = rpc.rpc_sync(
946
+ worker_name(dst_rank),
947
+ run_ref_script_module,
948
+ args=(remote_ref, torch.ones(self.rank)),
949
+ )
950
+ self.assertEqual(ret, local_ret)
951
+
952
+ # pass rref arg to self/user
953
+ with self.assertRaisesRegex(
954
+ RuntimeError,
955
+ "is an RRef to a ScriptModule. It can't be sent through RPC from owner,",
956
+ ):
957
+ ret = rpc.rpc_sync(
958
+ worker_name(self.rank),
959
+ run_ref_script_module,
960
+ args=(remote_ref, torch.ones(self.rank)),
961
+ )
962
+
963
+ @dist_init
964
+ def test_create_script_module_on_remote(self):
965
+ dst_name = worker_name((self.rank + 1) % self.world_size)
966
+ # Construct on remote end with rpc_sync
967
+ created_script_module = rpc.rpc_sync(
968
+ dst_name, MyScriptModule, args=(self.rank,)
969
+ )
970
+ # Forward should output a ones tensor of self.rank.
971
+ self.assertTrue(isinstance(created_script_module, torch.jit.ScriptModule))
972
+ rank_ones_tensor = created_script_module()
973
+ self.assertEqual(torch.ones(self.rank), rank_ones_tensor)
974
+
975
+ # Construct ScriptModule with rpc.remote.
976
+ remote_script_module = rpc.remote(dst_name, MyScriptModule, args=(self.rank,))
977
+ # Verify it is an instance of ScriptModule on remote end.
978
+ remote_end_is_script = rpc.rpc_sync(
979
+ remote_script_module.owner(),
980
+ rref_isinstance,
981
+ args=(remote_script_module, torch.jit.ScriptModule),
982
+ )
983
+ self.assertTrue(remote_end_is_script)
984
+ # Run forward pass remotely.
985
+ remote_forward_output = remote_script_module.rpc_sync().forward()
986
+ self.assertEqual(remote_forward_output, torch.ones(self.rank))
987
+ # Run function defined on ScriptModule remotely.
988
+ remote_func_output = remote_script_module.rpc_sync().custom_func()
989
+ self.assertEqual(remote_func_output, torch.ones(self.rank))
990
+ # Ensure we can transfer ScriptModule RRef to this rank and run
991
+ # forward pass.
992
+ local_script_module = remote_script_module.to_here()
993
+ self.assertTrue(isinstance(local_script_module, torch.jit.ScriptModule))
994
+ rank_ones_tensor = local_script_module()
995
+ self.assertEqual(rank_ones_tensor, torch.ones(self.rank))
996
+ local_script_func_output = local_script_module.custom_func()
997
+ self.assertEqual(local_script_func_output, torch.ones(self.rank))
998
+
999
+ @dist_init
1000
+ def test_load_script_module_with_pickled_rref(self):
1001
+ dst_name = worker_name((self.rank + 1) % self.world_size)
1002
+ m1 = MyScriptModuleWithRRefs(dst_name)
1003
+ m2 = MyScriptModuleWithRRefs(dst_name)
1004
+
1005
+ f = io.BytesIO()
1006
+
1007
+ rpc._enable_jit_rref_pickle()
1008
+ torch.jit.save(m1, f)
1009
+ rpc._disable_jit_rref_pickle()
1010
+
1011
+ out1 = rpc.rpc_sync(
1012
+ dst_name,
1013
+ load_script_module_with_pickled_rref,
1014
+ args=(f.getvalue(),)
1015
+ )
1016
+ out2 = m2()
1017
+ self.assertEqual(out1, out2)
1018
+
1019
+ @dist_init
1020
+ def test_rref_jit_pickle_not_supported(self):
1021
+ n = self.rank + 1
1022
+ dst_rank = n % self.world_size
1023
+ rref_var = rpc_return_rref(worker_name(dst_rank))
1024
+ with TemporaryFileName() as fname:
1025
+ with self.assertRaisesRegex(
1026
+ RuntimeError, "RRef jit pickling is only allowed inside RPC calls"
1027
+ ):
1028
+ save_rref(rref_var, fname)
1029
+
1030
+ @dist_init
1031
+ def test_remote_script_throw(self):
1032
+ rref = rpc.remote(
1033
+ worker_name((self.rank + 1) % self.world_size),
1034
+ script_raise_func,
1035
+ args=(torch.ones(2),),
1036
+ )
1037
+ with self.assertRaisesRegex(Exception, ".*Expected error.*"):
1038
+ rref.to_here()
1039
+
1040
+ @dist_init
1041
+ def test_remote_script_udf(self):
1042
+ rref = rpc.remote(
1043
+ worker_name((self.rank + 1) % self.world_size),
1044
+ script_fork_wait_udf,
1045
+ args=(torch.ones(2),),
1046
+ )
1047
+ self.assertEqual(rref.to_here(), torch.ones(2) * 2)
1048
+
1049
+ @dist_init
1050
+ def test_async_script_udf(self):
1051
+ future = rpc.rpc_async(
1052
+ worker_name((self.rank + 1) % self.world_size),
1053
+ script_fork_wait_udf,
1054
+ args=(torch.ones(2),),
1055
+ )
1056
+ self.assertEqual(future.wait(), torch.ones(2) * 2)
1057
+
1058
+ @dist_init
1059
+ def test_callback_simple(self):
1060
+ def callback(fut):
1061
+ return fut.wait() + 1
1062
+
1063
+ future = rpc.rpc_async(
1064
+ worker_name((self.rank + 1) % self.world_size),
1065
+ script_fork_wait_udf,
1066
+ args=(torch.ones(2),),
1067
+ ).then(callback)
1068
+ self.assertEqual(future.wait(), torch.ones(2) * 2 + 1)
1069
+
1070
+ @dist_init
1071
+ def test_callback_chain(self):
1072
+ n = self.rank + 1
1073
+ dst = worker_name(n % self.world_size)
1074
+
1075
+ def callback(fut):
1076
+ return fut.wait() + 1
1077
+
1078
+ fut = rpc.rpc_async(
1079
+ worker_name(n % self.world_size), one_arg, args=(torch.ones(n, n),)
1080
+ )
1081
+
1082
+ num_cbs = 20
1083
+ for _ in range(num_cbs):
1084
+ fut = fut.then(callback)
1085
+
1086
+ self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
1087
+
1088
+ @dist_init
1089
+ def test_add_done_callback(self):
1090
+ callback_called = None
1091
+
1092
+ def callback(fut):
1093
+ nonlocal callback_called
1094
+ callback_called = fut.wait() * 2
1095
+
1096
+ future = rpc.rpc_async(
1097
+ worker_name((self.rank + 1) % self.world_size),
1098
+ script_fork_wait_udf,
1099
+ args=(torch.ones(2),),
1100
+ )
1101
+
1102
+ future.add_done_callback(callback)
1103
+ future_then = future.then(lambda _: True)
1104
+
1105
+ self.assertEqual(future.wait(), torch.ones(2) * 2)
1106
+
1107
+ # We have no guarantee that the add_done_callback fn will execute before the test finishes.
1108
+ # Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
1109
+ future_then.wait()
1110
+ self.assertEqual(callback_called, torch.ones(2) * 4)
1111
+
1112
+ @dist_init
1113
+ def test_async_script_throw(self):
1114
+ future = rpc.rpc_async(
1115
+ worker_name((self.rank + 1) % self.world_size),
1116
+ script_fork_wait_throw,
1117
+ args=(torch.ones(2),),
1118
+ )
1119
+ with self.assertRaisesRegex(Exception, ".*Expected error.*"):
1120
+ future.wait()
1121
+
1122
+ @dist_init
1123
+ def test_callback_with_exception(self):
1124
+ def callback(fut):
1125
+ with self.assertRaisesRegex(Exception, ".*Expected error.*"):
1126
+ fut.wait()
1127
+ raise RuntimeError("Another expected error")
1128
+
1129
+ future = rpc.rpc_async(
1130
+ worker_name((self.rank + 1) % self.world_size),
1131
+ script_fork_wait_throw,
1132
+ args=(torch.ones(2),),
1133
+ ).then(callback)
1134
+
1135
+ with self.assertRaisesRegex(RuntimeError, "Another expected error"):
1136
+ future.wait()
1137
+
1138
+ @dist_init
1139
+ def test_call_rpc_with_profiling(self):
1140
+ # Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit
1141
+ # future from within a script function that calls rpc_async
1142
+ if self.rank == 0:
1143
+ with _profile() as prof:
1144
+ prof_key = _build_rpc_profiling_key(
1145
+ RPCExecMode.ASYNC,
1146
+ torch._jit_internal._qualified_name(one_arg),
1147
+ "worker0",
1148
+ "worker1",
1149
+ )
1150
+ with torch.autograd.profiler.record_function(prof_key) as rf:
1151
+ ret = call_rpc_with_profiling(rf.record, "worker1")
1152
+ # TODO: Can't get a reliable time for this profiling event since
1153
+ # it's hard to estimate the execution time on the remote end for non-UDFs.
1154
+ # This can be resolved by https://github.com/pytorch/pytorch/issues/36272.
1155
+ # After that, this test should be modified to validate the function time.
1156
+ events = prof.function_events
1157
+ function_event = get_function_event(events, prof_key)
1158
+ self.assertTrue(torch._jit_internal._qualified_name(one_arg) in function_event.name)
1159
+
1160
+ @dist_init
1161
+ def test_rpc_async_jit_profiled(self):
1162
+ # Tests that rpc_async calls made from within a TorchScript function are
1163
+ # profiled.
1164
+ if self.rank == 0:
1165
+ dst_rank = (self.rank + 1) % self.world_size
1166
+ dst_worker_name = worker_name(dst_rank)
1167
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
1168
+ kwargs = {}
1169
+ with _profile() as prof:
1170
+ script_rpc_async_call(
1171
+ dst_worker_name, args, kwargs
1172
+ )
1173
+
1174
+ # Ensure rpc_async call is profiled
1175
+ function_events = prof.function_events
1176
+ qual_name = torch._jit_internal._qualified_name(two_args_two_kwargs)
1177
+ rpc_async_jit_event = [
1178
+ event
1179
+ for event in function_events
1180
+ if qual_name in event.name and event.node_id == self.rank
1181
+ ]
1182
+ self.assertEqual(len(rpc_async_jit_event), 1)
1183
+ rpc_async_jit_event = rpc_async_jit_event[0]
1184
+ profiled_name = _build_rpc_profiling_key(
1185
+ RPCExecMode.ASYNC_JIT,
1186
+ qual_name,
1187
+ worker_name(self.rank),
1188
+ dst_worker_name,
1189
+ )
1190
+ self.assertEqual(profiled_name, rpc_async_jit_event.name)
1191
+ remote_events = [event for event in function_events if event.is_remote]
1192
+ # All remote events should have taken place on dst_rank
1193
+ remote_event_node_ids = {
1194
+ remote_event.node_id for remote_event in remote_events
1195
+ }
1196
+ self.assertEqual(remote_event_node_ids, {dst_rank})
1197
+ # script_rpc_async_call invokes add operator
1198
+ # so we should see this as a remote event.
1199
+ remote_add = next(
1200
+ remote_event
1201
+ for remote_event in remote_events
1202
+ if "aten::add" in remote_event.name
1203
+ )
1204
+ remote_add_profiled_name = f"{profiled_name}#remote_op: aten::add"
1205
+ self.assertEqual(remote_add.name, remote_add_profiled_name)
1206
+
1207
+ @dist_init
1208
+ def test_record_function_on_caller_rpc_async(self):
1209
+ if self.rank == 0:
1210
+ dst_rank = (self.rank + 1) % self.world_size
1211
+ dst_worker_name = worker_name(dst_rank)
1212
+ block_scope = "foo"
1213
+ with _profile() as prof:
1214
+ # Runs 2 rpc_async calls within JIT under record_function.
1215
+ record_function_on_caller_rpc_async(dst_worker_name, block_scope)
1216
+
1217
+ # Ensure record_function event is profiled.
1218
+ function_events = prof.function_events
1219
+ record_function_scope_event = [
1220
+ event for event in function_events if event.name == block_scope
1221
+ ]
1222
+ self.assertEqual(1, len(record_function_scope_event))
1223
+ record_function_scope_event = record_function_scope_event[0]
1224
+ # Ensure RPC future is profiled.
1225
+ expected_key = _build_rpc_profiling_key(
1226
+ RPCExecMode.ASYNC_JIT,
1227
+ torch._jit_internal._qualified_name(script_add_ones),
1228
+ worker_name(self.rank),
1229
+ dst_worker_name,
1230
+ )
1231
+ jit_rpc_events = [
1232
+ event for event in function_events if event.name == expected_key
1233
+ ]
1234
+ self.assertEqual(2, len(jit_rpc_events))
1235
+ # Validate that the record_function scope time is greater than both
1236
+ # of the individual RPC async call times. The reason it is not necessarily
1237
+ # greater than the sum is because the two can execute in parallel.
1238
+ for jit_rpc_event in jit_rpc_events:
1239
+ self.assertTrue(
1240
+ record_function_scope_event.cpu_time_total
1241
+ > jit_rpc_event.cpu_time_total
1242
+ )
1243
+
1244
+ @dist_init
1245
+ def test_rpc_torchscript_record_function(self):
1246
+ # tests that torchscript functions can be profiled using with
1247
+ # record_function(...) over RPC.
1248
+ REMOTE_OP_STR = "#remote_op: "
1249
+ if self.rank == 0:
1250
+ dst_rank = (self.rank + 1) % self.world_size
1251
+ dst_worker_name = worker_name(dst_rank)
1252
+ block_scope = "foo"
1253
+ with _profile() as prof:
1254
+ call_rpc_torchscript_with_record_function(dst_worker_name, block_scope)
1255
+
1256
+ # Need to call below to populate CPU children.
1257
+ prof.key_averages()
1258
+ function_events = prof.function_events
1259
+ expected_key = (
1260
+ _build_rpc_profiling_key(
1261
+ RPCExecMode.ASYNC_JIT,
1262
+ torch._jit_internal._qualified_name(
1263
+ script_add_ones_with_record_function
1264
+ ),
1265
+ worker_name(self.rank),
1266
+ dst_worker_name,
1267
+ )
1268
+ + REMOTE_OP_STR
1269
+ + block_scope
1270
+ )
1271
+ remote_record_function_event = next(
1272
+ evt for evt in function_events if evt.name == expected_key
1273
+ )
1274
+ self.assertTrue(block_scope in remote_record_function_event.name)
1275
+ remote_children = remote_record_function_event.cpu_children
1276
+ self.assertTrue("aten::add" in child.name for child in remote_children)
1277
+
1278
+ def test_record_function_jit_end_callbacks_with_fork(self):
1279
+ # Ensures that we can call rf._call_end_callbacks_on_future on a jit
1280
+ # future in python eager mode with torch.jit.fork
1281
+ sleep_interval = 1
1282
+ with _profile() as prof:
1283
+ with torch.autograd.profiler.record_function("foo") as rf:
1284
+ fut = torch.jit._fork(sleep, sleep_interval)
1285
+ rf._call_end_callbacks_on_future(fut)
1286
+ fut.wait()
1287
+
1288
+ function_events = prof.function_events
1289
+ sleep_event = get_function_event(function_events, "foo")
1290
+ self.assertEqual(sleep_event.name, "foo")
1291
+ # Validate that callbacks were fired at the right time by checking the
1292
+ # profiling event cpu time
1293
+ self.assertGreaterAlmostEqual(sleep_event.cpu_time * 1e-6, sleep_interval)
1294
+
1295
+ def test_call_fork_in_jit_with_profiling(self):
1296
+ # Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit
1297
+ # future from within a script function with torch.jit.fork
1298
+ with _profile() as prof:
1299
+ with torch.autograd.profiler.record_function("foo") as rf:
1300
+ ret = call_fork_with_profiling(rf.record)
1301
+
1302
+ events = prof.function_events
1303
+ function_event = get_function_event(events, "foo")
1304
+ self.assertEqual(function_event.name, "foo")
1305
+
1306
+ @dist_init
1307
+ def test_async_function_simple(self):
1308
+ dst1 = worker_name((self.rank + 1) % self.world_size)
1309
+ dst2 = worker_name((self.rank + 2) % self.world_size)
1310
+
1311
+ ret = rpc.rpc_sync(
1312
+ dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2))
1313
+ )
1314
+ self.assertEqual(ret, torch.ones(2, 2) + 1)
1315
+
1316
+ @dist_init
1317
+ def test_async_function_wrong_return_type(self):
1318
+ with self.assertRaisesRegex(
1319
+ RuntimeError,
1320
+ "Async functions must return an IValue of Future type, but got Tensor",
1321
+ ):
1322
+ rpc.rpc_sync(
1323
+ worker_name((self.rank + 1) % self.world_size), async_wrong_type
1324
+ )
1325
+
1326
+ @dist_init
1327
+ def test_async_function_wrong_decorator_order(self):
1328
+ # @torch.jit.script complains about undefined value rpc. Error is shown
1329
+ # below. The reason for not checking error string is to avoid making
1330
+ # JIT error handling code depend on RPC tests, as we don't have any
1331
+ # restrictions on the error message here.
1332
+ #
1333
+ # RuntimeError:
1334
+ # undefined value rpc:
1335
+ # def async_wrong_decorator_order(to, x, y):
1336
+ # # type: (str, Tensor, Tensor) -> Future[Tensor]
1337
+ # return rpc.rpc_async(to, script_add, (x, y))
1338
+ # ~~~ <--- HERE
1339
+ with self.assertRaises(RuntimeError):
1340
+
1341
+ @torch.jit.script
1342
+ @rpc.functions.async_execution
1343
+ def async_wrong_decorator_order(
1344
+ to: str, x: Tensor, y: Tensor
1345
+ ) -> Future[Tensor]:
1346
+ return rpc.rpc_async(to, script_add, (x, y))
1347
+
1348
+ @dist_init
1349
+ def test_async_function_remote(self):
1350
+ dst1 = worker_name((self.rank + 1) % self.world_size)
1351
+ dst2 = worker_name((self.rank + 2) % self.world_size)
1352
+
1353
+ rref = rpc.remote(
1354
+ dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2))
1355
+ )
1356
+ self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
1357
+
1358
+ @dist_init
1359
+ def test_async_function_remote_multi(self):
1360
+ dst1 = worker_name((self.rank + 1) % self.world_size)
1361
+ dst2 = worker_name((self.rank + 2) % self.world_size)
1362
+
1363
+ num = 20
1364
+ rrefs = []
1365
+ for i in range(num):
1366
+ rrefs.append(
1367
+ rpc.remote(
1368
+ dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2) * i)
1369
+ )
1370
+ )
1371
+
1372
+ for i in range(num):
1373
+ self.assertEqual(rrefs[i].to_here(), torch.ones(2, 2) + i)
1374
+
1375
+ @dist_init
1376
+ def test_async_function_wrong_return_type_remote(self):
1377
+ rref = rpc.remote(
1378
+ worker_name((self.rank + 1) % self.world_size), async_wrong_type
1379
+ )
1380
+
1381
+ with self.assertRaisesRegex(
1382
+ RuntimeError,
1383
+ "Async functions must return an IValue of Future type, but got Tensor",
1384
+ ):
1385
+ rref.to_here()
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ from typing import Dict, Tuple
4
+
5
+ import torch
6
+ import torch.distributed.rpc as rpc
7
+ from torch import Tensor
8
+ from torch.distributed.rpc import RRef
9
+ from torch.testing._internal.dist_utils import (
10
+ dist_init,
11
+ worker_name,
12
+ wait_until_pending_futures_and_users_flushed
13
+ )
14
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
15
+ RpcAgentTestFixture,
16
+ )
17
+
18
+
19
+ @torch.jit.script
20
+ def two_args_two_kwargs(
21
+ first_arg,
22
+ second_arg,
23
+ first_kwarg=torch.tensor([3, 3]),
24
+ second_kwarg=torch.tensor([4, 4]),
25
+ ):
26
+ return first_arg + second_arg + first_kwarg + second_kwarg
27
+
28
+
29
+ @torch.jit.script
30
+ def script_rpc_async_call(
31
+ dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
32
+ ):
33
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
34
+ ret = fut.wait()
35
+ return ret
36
+
37
+
38
+ @torch.jit.script
39
+ def rpc_async_call_with_timeout(
40
+ dst_worker_name: str,
41
+ args: Tuple[Tensor, Tensor],
42
+ kwargs: Dict[str, Tensor],
43
+ timeout: float,
44
+ ):
45
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout)
46
+ ret = fut.wait()
47
+ return ret
48
+
49
+
50
+ @torch.jit.script
51
+ def rpc_async_call_with_timeout_future_ret(
52
+ dst_worker_name: str,
53
+ args: Tuple[Tensor, Tensor],
54
+ kwargs: Dict[str, Tensor],
55
+ timeout: float,
56
+ ):
57
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout)
58
+ return fut
59
+
60
+
61
+ @torch.jit.script
62
+ def rpc_async_call_future_ret(
63
+ dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
64
+ ):
65
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
66
+ return fut
67
+
68
+ @torch.jit.script
69
+ def rref_to_here(rref_var: RRef[Tensor]) -> Tensor:
70
+ return rref_var.to_here()
71
+
72
+ @torch.jit.script
73
+ def rref_to_here_with_timeout(rref_var: RRef[Tensor], timeout: float) -> Tensor:
74
+ return rref_var.to_here(timeout)
75
+
76
+ @torch.jit.script
77
+ def rpc_async_with_rref_arg(dst_worker_name: str, args: Tuple[RRef[Tensor]]) -> Tensor:
78
+ fut = rpc.rpc_async(dst_worker_name, rref_to_here, args)
79
+ ret = fut.wait()
80
+ return ret
81
+
82
+
83
+ class JitFaultyAgentRpcTest(RpcAgentTestFixture):
84
+ """
85
+ Run tests for rpc_async in JIT under the faulty agent test fixture to test
86
+ arbitrary timeouts.
87
+ """
88
+ @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
89
+ def test_timeout_in_torchscript_function(self):
90
+ # Call rpc_async + fut.wait() in torchscript function and ensure that
91
+ # timeout is raised.
92
+ if self.rank != 0:
93
+ return
94
+
95
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
96
+
97
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
98
+ kwargs = {
99
+ "first_kwarg": torch.tensor([2, 2]),
100
+ "second_kwarg": torch.tensor([3, 3]),
101
+ }
102
+ expected_error = self.get_timeout_error_regex()
103
+ # Ensure that we get a timeout if we override the default timeout and
104
+ # the RPC takes longer to execute.
105
+ with self.assertRaisesRegex(RuntimeError, expected_error):
106
+ rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0.5)
107
+
108
+ # Ensure that we timeout if we don't specify a timeout but the default
109
+ # is less than the RPC takes to execute.
110
+ rpc._set_rpc_timeout(0.001)
111
+ with self.assertRaisesRegex(RuntimeError, expected_error):
112
+ script_rpc_async_call(
113
+ dst_worker_name, args, kwargs
114
+ )
115
+
116
+ # Ensure that we run to completion if zero timeout is specified.
117
+ ret = rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0)
118
+ self.assertEqual(ret, torch.tensor([8, 8]))
119
+ # reset for clean shutdown
120
+ rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
121
+
122
+ @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
123
+ def test_timeout_in_python(self):
124
+ # Ensures timeouts are raised if we call rpc_async from within a
125
+ # torchscript function, but wait on the future in python.
126
+ if self.rank != 0:
127
+ return
128
+
129
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
130
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
131
+ kwargs = {
132
+ "first_kwarg": torch.tensor([2, 2]),
133
+ "second_kwarg": torch.tensor([3, 3]),
134
+ }
135
+ expected_error = self.get_timeout_error_regex()
136
+
137
+ fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0.5)
138
+ with self.assertRaisesRegex(RuntimeError, expected_error):
139
+ fut.wait()
140
+
141
+ # Ensure timeout if we don't specify but the default is less than the
142
+ # RPC takes to execute.
143
+ rpc._set_rpc_timeout(0.001)
144
+ fut = rpc_async_call_future_ret(dst_worker_name, args, kwargs)
145
+ with self.assertRaisesRegex(RuntimeError, expected_error):
146
+ fut.wait()
147
+
148
+ # Ensure run to completion if zero timeout is specified
149
+ fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0)
150
+ result = fut.wait()
151
+ self.assertEqual(result, torch.tensor([8, 8]))
152
+ # reset for clean shutdown
153
+ rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
154
+
155
+ @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
156
+ def test_remote_timeout_to_here_in_jit(self):
157
+ # Test that calling to_here() in JIT will raise timeout error if
158
+ # rpc.remote failed.
159
+ if self.rank != 0:
160
+ return
161
+ dst_rank = (self.rank + 1) % self.world_size
162
+ dst_worker = f"worker{dst_rank}"
163
+ rref = rpc.remote(
164
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
165
+ )
166
+ # Will ensure error handling callbacks are run.
167
+ wait_until_pending_futures_and_users_flushed()
168
+ # Call to_here() within a ScriptFunction and ensure it raises
169
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
170
+ rref_to_here(rref)
171
+
172
+ @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
173
+ def test_rref_to_here_timeout_in_jit(self):
174
+ if self.rank != 0:
175
+ return
176
+
177
+ dst_rank = (self.rank + 1) % self.world_size
178
+ dst_worker = f"worker{dst_rank}"
179
+ rref = rpc.remote(
180
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
181
+ )
182
+ expected_error = self.get_timeout_error_regex()
183
+ with self.assertRaisesRegex(RuntimeError, expected_error):
184
+ rref_to_here_with_timeout(rref, 0.01)
185
+
186
+ rref_to_here_with_timeout(rref, 100)
187
+
188
+ @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
189
+ def test_rref_timeout_pickle_in_jit(self):
190
+ if self.rank != 0:
191
+ return
192
+ dst_rank = (self.rank + 1) % self.world_size
193
+ dst_worker = f"worker{dst_rank}"
194
+ rref = rpc.remote(
195
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
196
+ )
197
+ # Will ensure error handling callbacks are run.
198
+ wait_until_pending_futures_and_users_flushed()
199
+ # Call RPC with RRef arg in JIT, which will go through JIT pickling and
200
+ # ensure error is raised.
201
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
202
+ rpc_async_with_rref_arg(dst_worker, (rref, ))
203
+
204
+ @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
205
+ def test_rref_timeout_pickle_script_func(self):
206
+ # Similar to above test, but calls python rpc with script function.
207
+ if self.rank != 0:
208
+ return
209
+ dst_rank = (self.rank + 1) % self.world_size
210
+ dst_worker = f"worker{dst_rank}"
211
+ rref = rpc.remote(
212
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
213
+ )
214
+ # Will ensure error handling callbacks are run.
215
+ wait_until_pending_futures_and_users_flushed()
216
+ # Call RPC with script function that takes RRef, ensure timeout during pickling
217
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
218
+ rpc.rpc_sync(dst_worker, rref_to_here, args=(rref, ))
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import os
4
+ from abc import ABC, abstractmethod
5
+
6
+ import torch.testing._internal.dist_utils
7
+
8
+
9
+ class RpcAgentTestFixture(ABC):
10
+ @property
11
+ def world_size(self) -> int:
12
+ return 4
13
+
14
+ @property
15
+ def init_method(self):
16
+ use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
17
+ if use_tcp_init == "1":
18
+ master_addr = os.environ["MASTER_ADDR"]
19
+ master_port = os.environ["MASTER_PORT"]
20
+ return f"tcp://{master_addr}:{master_port}"
21
+ else:
22
+ return self.file_init_method
23
+
24
+ @property
25
+ def file_init_method(self):
26
+ return torch.testing._internal.dist_utils.INIT_METHOD_TEMPLATE.format(
27
+ file_name=self.file_name
28
+ )
29
+
30
+ @property
31
+ @abstractmethod
32
+ def rpc_backend(self):
33
+ pass
34
+
35
+ @property
36
+ @abstractmethod
37
+ def rpc_backend_options(self):
38
+ pass
39
+
40
+ def setup_fault_injection(self, faulty_messages, messages_to_delay): # noqa: B027
41
+ """Method used by dist_init to prepare the faulty agent.
42
+
43
+ Does nothing for other agents.
44
+ """
45
+
46
+ # Shutdown sequence is not well defined, so we may see any of the following
47
+ # errors when running tests that simulate errors via a shutdown on the
48
+ # remote end.
49
+ @abstractmethod
50
+ def get_shutdown_error_regex(self):
51
+ """
52
+ Return various error message we may see from RPC agents while running
53
+ tests that check for failures. This function is used to match against
54
+ possible errors to ensure failures were raised properly.
55
+ """
56
+
57
+ @abstractmethod
58
+ def get_timeout_error_regex(self):
59
+ """
60
+ Returns a partial string indicating the error we should receive when an
61
+ RPC has timed out. Useful for use with assertRaisesRegex() to ensure we
62
+ have the right errors during timeout.
63
+ """
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch.distributed.rpc as rpc
4
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
5
+ RpcAgentTestFixture,
6
+ )
7
+ from torch.testing._internal.common_distributed import (
8
+ tp_transports,
9
+ )
10
+
11
+
12
+ class TensorPipeRpcAgentTestFixture(RpcAgentTestFixture):
13
+ @property
14
+ def rpc_backend(self):
15
+ return rpc.backend_registry.BackendType[
16
+ "TENSORPIPE"
17
+ ]
18
+
19
+ @property
20
+ def rpc_backend_options(self):
21
+ return rpc.backend_registry.construct_rpc_backend_options(
22
+ self.rpc_backend,
23
+ init_method=self.init_method,
24
+ _transports=tp_transports()
25
+ )
26
+
27
+ def get_shutdown_error_regex(self):
28
+ # FIXME Once we consolidate the error messages returned by the
29
+ # TensorPipe agent put some more specific regex here.
30
+ error_regexes = [".*"]
31
+ return "|".join([f"({error_str})" for error_str in error_regexes])
32
+
33
+ def get_timeout_error_regex(self):
34
+ return "RPC ran for more than"