cyd0806 commited on
Commit
bacbb4b
·
verified ·
1 Parent(s): 4ed3182

Upload apex-master/tests/L0/run_transformer/test_parallel_state.py with huggingface_hub

Browse files
apex-master/tests/L0/run_transformer/test_parallel_state.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+
4
+ from torch.testing._internal import common_utils
5
+
6
+ logging.getLogger("torch").setLevel(logging.WARNING)
7
+
8
+ from apex.transformer import parallel_state
9
+ from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
10
+ from apex.transformer.testing.distributed_test_base import UccDistributedTestBase
11
+
12
+ logging.getLogger("apex").setLevel(logging.WARNING)
13
+
14
+
15
+ os.environ["BACKEND"] = "NCCL"
16
+ DATA_PARALLEL_WORLD_SIZE: int = 1
17
+
18
+
19
+ def calc_expected_tensor_model_paralell_rank(
20
+ rank: int, tensor_model_parallel_world_size: int,
21
+ ) -> int:
22
+ return rank % tensor_model_parallel_world_size
23
+
24
+
25
+ class ParallelStateTestBase:
26
+ def test_initialize_model_parallel(self) -> None:
27
+
28
+ self.assertFalse(parallel_state.model_parallel_is_initialized())
29
+
30
+ for tensor_model_parallel_world_size in range(1, self.world_size + 1):
31
+ msg = f"tensor_model_parallel_world_siz: {tensor_model_parallel_world_size}"
32
+ if self.world_size % tensor_model_parallel_world_size:
33
+ continue
34
+
35
+ pipeline_model_parallel_world_size = (
36
+ self.world_size // tensor_model_parallel_world_size
37
+ )
38
+
39
+ parallel_state.initialize_model_parallel(
40
+ tensor_model_parallel_size_=tensor_model_parallel_world_size,
41
+ pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
42
+ )
43
+ self.assertEqual(
44
+ tensor_model_parallel_world_size,
45
+ parallel_state.get_tensor_model_parallel_world_size(),
46
+ msg=msg,
47
+ )
48
+ expected_tensor_model_parallel_rank = calc_expected_tensor_model_paralell_rank(
49
+ self.rank, tensor_model_parallel_world_size
50
+ )
51
+ self.assertEqual(
52
+ expected_tensor_model_parallel_rank,
53
+ parallel_state.get_tensor_model_parallel_rank(),
54
+ msg=msg,
55
+ )
56
+
57
+ expected_tensor_model_parallel_src_rank = (
58
+ self.rank // tensor_model_parallel_world_size
59
+ ) * tensor_model_parallel_world_size
60
+ self.assertEqual(
61
+ expected_tensor_model_parallel_src_rank,
62
+ parallel_state.get_tensor_model_parallel_src_rank(),
63
+ msg=msg,
64
+ )
65
+
66
+ parallel_state.destroy_model_parallel()
67
+ self.assertFalse(parallel_state.model_parallel_is_initialized(), msg=msg)
68
+
69
+ def test_initialize_model_parallel_with_virtual_and_split(self) -> None:
70
+ if self.world_size < 4:
71
+ self.skipTest("requires >= 4 GPUs")
72
+ self.assertFalse(parallel_state.model_parallel_is_initialized())
73
+
74
+ tensor_model_parallel_world_size = 1 + int(self.world_size > 4)
75
+ pipeline_model_parallel_world_size = (
76
+ self.world_size // tensor_model_parallel_world_size
77
+ )
78
+ virtual_pipeline_model_parallel_world_size = 2
79
+ pipeline_model_parallel_split_rank = pipeline_model_parallel_world_size // 2
80
+
81
+ parallel_state.initialize_model_parallel(
82
+ tensor_model_parallel_size_=tensor_model_parallel_world_size,
83
+ pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
84
+ virtual_pipeline_model_parallel_size_=virtual_pipeline_model_parallel_world_size,
85
+ pipeline_model_parallel_split_rank_=pipeline_model_parallel_split_rank,
86
+ )
87
+ self.assertEqual(
88
+ calc_expected_tensor_model_paralell_rank(
89
+ self.rank, tensor_model_parallel_world_size
90
+ ),
91
+ parallel_state.get_tensor_model_parallel_rank(),
92
+ )
93
+ self.assertEqual(
94
+ pipeline_model_parallel_world_size,
95
+ parallel_state.get_pipeline_model_parallel_world_size(),
96
+ )
97
+ self.assertEqual(
98
+ virtual_pipeline_model_parallel_world_size,
99
+ parallel_state.get_virtual_pipeline_model_parallel_world_size(),
100
+ )
101
+
102
+ expected_pipeline_rank = (
103
+ self.rank - (self.rank % tensor_model_parallel_world_size)
104
+ ) % pipeline_model_parallel_world_size
105
+ self.assertEqual(
106
+ expected_pipeline_rank, parallel_state.get_pipeline_model_parallel_rank(),
107
+ )
108
+ # virtual pipeline model parallel rank is lazily set, i.e., right after the call of
109
+ # `initialize_model_parallel`, it's set to 0.
110
+ self.assertEqual(
111
+ 0, parallel_state.get_virtual_pipeline_model_parallel_rank(),
112
+ )
113
+ self.assertEqual(
114
+ pipeline_model_parallel_split_rank,
115
+ parallel_state.get_pipeline_model_parallel_split_rank(),
116
+ )
117
+
118
+ fake_split_rank = 77
119
+ parallel_state.set_pipeline_model_parallel_split_rank(fake_split_rank)
120
+ self.assertEqual(
121
+ fake_split_rank, parallel_state.get_pipeline_model_parallel_split_rank()
122
+ )
123
+
124
+ # relative position embedding groups check
125
+ self.assertEqual(
126
+ expected_pipeline_rank < pipeline_model_parallel_split_rank,
127
+ parallel_state.is_rank_in_encoder_relative_position_embedding_group(),
128
+ )
129
+ self.assertEqual(
130
+ expected_pipeline_rank >= pipeline_model_parallel_split_rank,
131
+ parallel_state.is_rank_in_decoder_relative_position_embedding_group(),
132
+ )
133
+
134
+ parallel_state.destroy_model_parallel()
135
+
136
+ def test_initialize_model_parallel_decoder_only(self) -> None:
137
+ """Initialize model parallelism for decoder-only Transformers like GPT-3"""
138
+
139
+ self.assertFalse(parallel_state.model_parallel_is_initialized())
140
+
141
+ for tensor_model_parallel_world_size in range(1, self.world_size + 1):
142
+ msg = f"tensor_model_parallel_world_size: {tensor_model_parallel_world_size}"
143
+ if self.world_size % tensor_model_parallel_world_size:
144
+ continue
145
+
146
+ pipeline_model_parallel_world_size = (
147
+ self.world_size // tensor_model_parallel_world_size
148
+ )
149
+
150
+ parallel_state.initialize_model_parallel(
151
+ tensor_model_parallel_size_=tensor_model_parallel_world_size,
152
+ pipeline_model_parallel_size_=pipeline_model_parallel_world_size,
153
+ pipeline_model_parallel_split_rank_=0,
154
+ )
155
+ self.assertEqual(
156
+ tensor_model_parallel_world_size,
157
+ parallel_state.get_tensor_model_parallel_world_size(),
158
+ msg=msg,
159
+ )
160
+ expected_tensor_model_parallel_rank = calc_expected_tensor_model_paralell_rank(
161
+ self.rank, tensor_model_parallel_world_size
162
+ )
163
+ self.assertEqual(
164
+ expected_tensor_model_parallel_rank,
165
+ parallel_state.get_tensor_model_parallel_rank(),
166
+ msg=msg,
167
+ )
168
+
169
+ expected_tensor_model_parallel_src_rank = (
170
+ self.rank // tensor_model_parallel_world_size
171
+ ) * tensor_model_parallel_world_size
172
+ self.assertEqual(
173
+ expected_tensor_model_parallel_src_rank,
174
+ parallel_state.get_tensor_model_parallel_src_rank(),
175
+ msg=msg,
176
+ )
177
+
178
+ parallel_state.destroy_model_parallel()
179
+ self.assertFalse(parallel_state.model_parallel_is_initialized(), msg=msg)
180
+
181
+
182
+ class NcclParallelStateTest(ParallelStateTestBase, NcclDistributedTestBase): pass
183
+ class UccParallelStateTest(ParallelStateTestBase, UccDistributedTestBase): pass
184
+
185
+
186
+ if __name__ == "__main__":
187
+ common_utils.run_tests()