Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- app.py +274 -0
- descriptions.py +24 -0
app.py
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
import numpy as np
|
4 |
+
from descriptions import basic_texts, descriptions
|
5 |
+
|
6 |
+
def generate_data_parallel_groups(world_size, tensor_model_parallel_size, pipeline_model_parallel_size, context_parallel_size):
|
7 |
+
"""
|
8 |
+
Generate data parallel groups based on the provided parallelism parameters.
|
9 |
+
"""
|
10 |
+
assert world_size % (pipeline_model_parallel_size * tensor_model_parallel_size * context_parallel_size) == 0, "world_size must be divisible by the product of pipeline_model_parallel_size, tensor_model_parallel_size, and context_parallel_size"
|
11 |
+
data_parallel_group_ranks = []
|
12 |
+
num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size
|
13 |
+
|
14 |
+
for i in range(pipeline_model_parallel_size):
|
15 |
+
start_rank = i * num_pipeline_model_parallel_groups
|
16 |
+
end_rank = (i + 1) * num_pipeline_model_parallel_groups
|
17 |
+
for j in range(context_parallel_size * tensor_model_parallel_size):
|
18 |
+
ranks = range(
|
19 |
+
start_rank + j, end_rank, context_parallel_size * tensor_model_parallel_size
|
20 |
+
)
|
21 |
+
data_parallel_group_ranks.append(list(ranks))
|
22 |
+
return data_parallel_group_ranks
|
23 |
+
|
24 |
+
def generate_context_data_parallel_groups(world_size, tensor_model_parallel_size, pipeline_model_parallel_size, context_parallel_size):
|
25 |
+
"""
|
26 |
+
Generate data parallel groups considering context parallelism.
|
27 |
+
"""
|
28 |
+
assert world_size % (pipeline_model_parallel_size * tensor_model_parallel_size * context_parallel_size) == 0, "world_size must be divisible by the product of pipeline_model_parallel_size, tensor_model_parallel_size, and context_parallel_size"
|
29 |
+
all_data_parallel_group_ranks_with_cp = []
|
30 |
+
num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size
|
31 |
+
|
32 |
+
for i in range(pipeline_model_parallel_size):
|
33 |
+
start_rank = i * num_pipeline_model_parallel_groups
|
34 |
+
end_rank = (i + 1) * num_pipeline_model_parallel_groups
|
35 |
+
for j in range(tensor_model_parallel_size):
|
36 |
+
ranks_with_cp = range(start_rank + j, end_rank, tensor_model_parallel_size)
|
37 |
+
all_data_parallel_group_ranks_with_cp.append(list(ranks_with_cp))
|
38 |
+
|
39 |
+
return all_data_parallel_group_ranks_with_cp
|
40 |
+
|
41 |
+
def generate_context_data_parallel_groups(world_size, tensor_model_parallel_size, pipeline_model_parallel_size, context_parallel_size):
|
42 |
+
"""
|
43 |
+
Generate data parallel groups considering context parallelism.
|
44 |
+
"""
|
45 |
+
assert world_size % (pipeline_model_parallel_size * tensor_model_parallel_size * context_parallel_size) == 0, "world_size must be divisible by the product of pipeline_model_parallel_size, tensor_model_parallel_size, and context_parallel_size"
|
46 |
+
all_data_parallel_group_ranks_with_cp = []
|
47 |
+
num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size
|
48 |
+
|
49 |
+
for i in range(pipeline_model_parallel_size):
|
50 |
+
start_rank = i * num_pipeline_model_parallel_groups
|
51 |
+
end_rank = (i + 1) * num_pipeline_model_parallel_groups
|
52 |
+
for j in range(tensor_model_parallel_size):
|
53 |
+
ranks_with_cp = range(start_rank + j, end_rank, tensor_model_parallel_size)
|
54 |
+
all_data_parallel_group_ranks_with_cp.append(list(ranks_with_cp))
|
55 |
+
|
56 |
+
return all_data_parallel_group_ranks_with_cp
|
57 |
+
|
58 |
+
def generate_tensor_model_parallel_groups(world_size, tensor_model_parallel_size):
|
59 |
+
"""
|
60 |
+
Generate model parallel groups based on tensor model parallel size.
|
61 |
+
"""
|
62 |
+
assert world_size % tensor_model_parallel_size == 0, "world_size must be divisible by tensor_model_parallel_size"
|
63 |
+
num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size
|
64 |
+
tensor_model_parallel_group_ranks = []
|
65 |
+
for i in range(num_tensor_model_parallel_groups):
|
66 |
+
ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
|
67 |
+
tensor_model_parallel_group_ranks.append(list(ranks))
|
68 |
+
return tensor_model_parallel_group_ranks
|
69 |
+
|
70 |
+
def generate_pipeline_parallel_groups(world_size, pipeline_model_parallel_size):
|
71 |
+
"""
|
72 |
+
Generate pipeline parallel groups based on pipeline model parallel size.
|
73 |
+
"""
|
74 |
+
assert world_size % pipeline_model_parallel_size == 0, "world_size must be divisible by pipeline_model_parallel_size"
|
75 |
+
num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size
|
76 |
+
pipline_parallel_group_ranks = []
|
77 |
+
|
78 |
+
for i in range(num_pipeline_model_parallel_groups):
|
79 |
+
ranks = range(i, world_size, num_pipeline_model_parallel_groups)
|
80 |
+
pipline_parallel_group_ranks.append(list(ranks))
|
81 |
+
return pipline_parallel_group_ranks
|
82 |
+
|
83 |
+
def generate_context_parallel_groups(world_size, context_parallel_size, tensor_model_parallel_size, pipeline_model_parallel_size):
|
84 |
+
"""
|
85 |
+
Generate context parallel groups based on context parallel size, considering tensor and pipeline model parallel sizes.
|
86 |
+
"""
|
87 |
+
assert world_size % (context_parallel_size * tensor_model_parallel_size * pipeline_model_parallel_size) == 0, "world_size must be divisible by the product of context_parallel_size, tensor_model_parallel_size, and pipeline_model_parallel_size"
|
88 |
+
data_parallel_size = world_size // (tensor_model_parallel_size * pipeline_model_parallel_size * context_parallel_size)
|
89 |
+
context_parallel_group_ranks = []
|
90 |
+
num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size
|
91 |
+
|
92 |
+
for i in range(pipeline_model_parallel_size):
|
93 |
+
for j in range(data_parallel_size):
|
94 |
+
start_rank = (
|
95 |
+
i * num_pipeline_model_parallel_groups
|
96 |
+
+ j * tensor_model_parallel_size * context_parallel_size
|
97 |
+
)
|
98 |
+
end_rank = (
|
99 |
+
i * num_pipeline_model_parallel_groups
|
100 |
+
+ (j + 1) * tensor_model_parallel_size * context_parallel_size
|
101 |
+
)
|
102 |
+
for k in range(tensor_model_parallel_size):
|
103 |
+
ranks = range(start_rank + k, end_rank, tensor_model_parallel_size)
|
104 |
+
context_parallel_group_ranks.append(list(ranks))
|
105 |
+
return context_parallel_group_ranks
|
106 |
+
|
107 |
+
def plot_parallel_groups(title="Parallel Groups", dp_groups=None, tp_groups=None, pp_groups=None, cp_groups=None):
|
108 |
+
# Initialize a figure
|
109 |
+
fig, ax = plt.subplots(figsize=(8, 6))
|
110 |
+
|
111 |
+
# Define the spacing between blocks and their size
|
112 |
+
block_size = 700 # Size of the blocks in the scatter plot
|
113 |
+
spacing = 1.5 # Spacing multiplier between blocks
|
114 |
+
if cp_groups is None:
|
115 |
+
cp_offset_x = 0
|
116 |
+
cp_offset_y = 0
|
117 |
+
tp_offset_x = 0.2
|
118 |
+
tp_offset_y = -0.2
|
119 |
+
if tp_groups:
|
120 |
+
pp_offset_x = 0.4
|
121 |
+
pp_offset_y = -0.4
|
122 |
+
else:
|
123 |
+
pp_offset_x = 0.2
|
124 |
+
pp_offset_y = -0.2
|
125 |
+
else:
|
126 |
+
cp_offset_x = 0.2
|
127 |
+
cp_offset_y = -0.2
|
128 |
+
tp_offset_x = 0.4
|
129 |
+
tp_offset_y = -0.4
|
130 |
+
if tp_groups:
|
131 |
+
pp_offset_x = 0.6
|
132 |
+
pp_offset_y = -0.6
|
133 |
+
else:
|
134 |
+
pp_offset_x = 0.4
|
135 |
+
pp_offset_y = -0.4
|
136 |
+
|
137 |
+
# Adjust the grid layout to map GPU ranks from top-left to bottom-right
|
138 |
+
num_cols = 4 # Number of columns in the grid
|
139 |
+
x_positions = np.tile(np.arange(num_cols), num_cols) * spacing
|
140 |
+
y_positions = np.repeat(np.arange(num_cols), num_cols)[::-1] * spacing # Reverse to start from top
|
141 |
+
|
142 |
+
dp_colors = plt.cm.tab20(np.linspace(0, 1, len(dp_groups)))
|
143 |
+
|
144 |
+
# 使用tab20b提高颜色区分度
|
145 |
+
if tp_groups is not None:
|
146 |
+
tp_colors = plt.cm.tab20b(np.linspace(0, 1, len(tp_groups)))
|
147 |
+
|
148 |
+
# 如果需要更多颜色,可以考虑结合使用tab20b和tab20c
|
149 |
+
if pp_groups is not None:
|
150 |
+
pp_colors = plt.cm.tab20c(np.linspace(0, 1, len(pp_groups)))
|
151 |
+
|
152 |
+
if cp_groups is not None:
|
153 |
+
cp_colors = plt.cm.tab20c(np.linspace(0, 1, len(cp_groups)))
|
154 |
+
|
155 |
+
if cp_groups is not None:
|
156 |
+
for group_idx, group in enumerate(cp_groups):
|
157 |
+
for rank in group:
|
158 |
+
x = x_positions[rank % (num_cols*num_cols)] + cp_offset_x
|
159 |
+
y = y_positions[rank % (num_cols*num_cols)] + cp_offset_y
|
160 |
+
ax.scatter(x, y, s=block_size, color=cp_colors[group_idx], edgecolor='black', zorder=5, marker='s')
|
161 |
+
ax.text(x, y, f'CP{rank}', ha='center', va='center', color='white', fontsize=8, zorder=6, fontweight='bold')
|
162 |
+
|
163 |
+
for group_idx, group in enumerate(dp_groups):
|
164 |
+
for rank in group:
|
165 |
+
x = x_positions[rank % (num_cols*num_cols)]
|
166 |
+
y = y_positions[rank % (num_cols*num_cols)]
|
167 |
+
ax.scatter(x, y, s=block_size, color=dp_colors[group_idx], edgecolor='black', zorder=5, marker='>')
|
168 |
+
ax.text(x, y, f'DP{rank}', ha='center', va='center', color='white', fontsize=8, zorder=6, fontweight='bold')
|
169 |
+
|
170 |
+
if tp_groups is not None:
|
171 |
+
for group_idx, group in enumerate(tp_groups):
|
172 |
+
for rank in group:
|
173 |
+
x = x_positions[rank % (num_cols*num_cols)] + tp_offset_x
|
174 |
+
y = y_positions[rank % (num_cols*num_cols)] + tp_offset_y
|
175 |
+
ax.scatter(x, y, s=block_size, color=tp_colors[group_idx], edgecolor='black', zorder=5, marker='p')
|
176 |
+
ax.text(x, y, f'TP{rank}', ha='center', va='center', color='white', fontsize=8, zorder=6, fontweight='bold')
|
177 |
+
|
178 |
+
if pp_groups is not None:
|
179 |
+
for group_idx, group in enumerate(pp_groups):
|
180 |
+
for rank in group:
|
181 |
+
x = x_positions[rank % (num_cols*num_cols)] + pp_offset_x
|
182 |
+
y = y_positions[rank % (num_cols*num_cols)] + pp_offset_y
|
183 |
+
ax.scatter(x, y, s=block_size, color=pp_colors[group_idx], edgecolor='black', zorder=5, marker='h')
|
184 |
+
ax.text(x, y, f'PP{rank}', ha='center', va='center', color='white', fontsize=8, zorder=6, fontweight='bold')
|
185 |
+
|
186 |
+
# Draw a separating line between Node0 and Node1
|
187 |
+
mid_y_position = np.max(y_positions) / 2
|
188 |
+
ax.axhline(y=mid_y_position, color='black', linestyle='-', linewidth=2, zorder=0)
|
189 |
+
|
190 |
+
# Add Node labels
|
191 |
+
ax.text(-spacing, max(y_positions)/4, 'Node1', verticalalignment='center', fontsize=12)
|
192 |
+
ax.text(-spacing, 3*max(y_positions)/4, 'Node0', verticalalignment='center', fontsize=12)
|
193 |
+
|
194 |
+
# Adjusting the appearance
|
195 |
+
ax.set_aspect('equal') # Keep the aspect ratio square
|
196 |
+
ax.axis('off') # Turn off the axis
|
197 |
+
plt.title(title, pad=30)
|
198 |
+
|
199 |
+
return fig
|
200 |
+
|
201 |
+
# Gradio interface setup
|
202 |
+
def create_interface():
|
203 |
+
def update_plot(parallel_group_type, tensor_model_parallel_size, pipeline_model_parallel_size, context_parallel_size, unused_text):
|
204 |
+
world_size = 16 # Fixed world size for 2 machines with 8 GPUs each
|
205 |
+
|
206 |
+
description = descriptions.get(parallel_group_type, "Invalid parallel group type")
|
207 |
+
|
208 |
+
# Initialize groups to None
|
209 |
+
data_groups = tp_groups = pp_groups = cp_groups = None
|
210 |
+
|
211 |
+
if "CP" in parallel_group_type or parallel_group_type == 'Context Parallel':
|
212 |
+
cp_groups = generate_context_parallel_groups(world_size, context_parallel_size, tensor_model_parallel_size, pipeline_model_parallel_size)
|
213 |
+
if "DP" in parallel_group_type:
|
214 |
+
data_groups = generate_context_data_parallel_groups(world_size, tensor_model_parallel_size, pipeline_model_parallel_size, context_parallel_size)
|
215 |
+
else:
|
216 |
+
if "DP" in parallel_group_type or parallel_group_type == 'Data Parallel':
|
217 |
+
data_groups = generate_data_parallel_groups(world_size, tensor_model_parallel_size, pipeline_model_parallel_size, context_parallel_size)
|
218 |
+
|
219 |
+
if parallel_group_type in ['Tensor Model Parallel', 'DP+TP', 'DP+TP+PP', 'CP+DP+TP', 'CP+DP+TP+PP']:
|
220 |
+
tp_groups = generate_tensor_model_parallel_groups(world_size, tensor_model_parallel_size)
|
221 |
+
if parallel_group_type in ['Pipeline Parallel', 'DP+PP', 'DP+TP+PP', 'CP+DP+PP', 'CP+DP+TP+PP']:
|
222 |
+
pp_groups = generate_pipeline_parallel_groups(world_size, pipeline_model_parallel_size)
|
223 |
+
|
224 |
+
# Prepare text description for display
|
225 |
+
groups_list_str = ""
|
226 |
+
if data_groups:
|
227 |
+
groups_list_str += "Data Parallel Groups:\n"
|
228 |
+
groups_list_str += "\n".join([f"Data Group {idx + 1}: {group}" for idx, group in enumerate(data_groups)])
|
229 |
+
groups_list_str += "\n--------------------------------------\n"
|
230 |
+
if tp_groups:
|
231 |
+
groups_list_str += "Tensor Model Parallel Groups:\n"
|
232 |
+
groups_list_str += "\n".join([f"Tensor Group {idx + 1}: {group}" for idx, group in enumerate(tp_groups)])
|
233 |
+
groups_list_str += "\n--------------------------------------\n"
|
234 |
+
if pp_groups:
|
235 |
+
groups_list_str += "Pipeline Model Parallel Groups:\n"
|
236 |
+
groups_list_str += "\n".join([f"Pipeline Group {idx + 1}: {group}" for idx, group in enumerate(pp_groups)])
|
237 |
+
groups_list_str += "\n--------------------------------------\n"
|
238 |
+
if cp_groups:
|
239 |
+
groups_list_str += "Context Parallel Groups:\n"
|
240 |
+
groups_list_str += "\n".join([f"Context Group {idx + 1}: {group}" for idx, group in enumerate(cp_groups)])
|
241 |
+
groups_list_str += "\n--------------------------------------\n"
|
242 |
+
|
243 |
+
text_to_display = f"==========Parallel Groups Display==========\n\n{groups_list_str}\n\n{description}"
|
244 |
+
|
245 |
+
# Generate the figure with the parallel groups
|
246 |
+
fig = plot_parallel_groups(f"{parallel_group_type} Groups", data_groups if data_groups else [], tp_groups=tp_groups, pp_groups=pp_groups, cp_groups=cp_groups)
|
247 |
+
|
248 |
+
return fig, text_to_display
|
249 |
+
|
250 |
+
iface = gr.Interface(
|
251 |
+
fn=update_plot,
|
252 |
+
inputs=[
|
253 |
+
gr.Dropdown(['Data Parallel', 'Tensor Model Parallel', 'Pipeline Parallel', 'Context Parallel',
|
254 |
+
'DP+TP', 'DP+PP', 'DP+TP+PP',
|
255 |
+
'CP+DP', 'CP+DP+TP', 'CP+DP+PP', 'CP+DP+TP+PP'], label="Parallel Group Type"),
|
256 |
+
gr.Slider(1, 8, step=1, label="Tensor Model Parallel Size"),
|
257 |
+
gr.Slider(1, 8, step=1, label="Pipeline Model Parallel Size"),
|
258 |
+
gr.Slider(1, 8, step=1, label="Context Parallel Size"),
|
259 |
+
gr.Textbox(basic_texts, interactive=False)
|
260 |
+
],
|
261 |
+
outputs=[
|
262 |
+
"plot",
|
263 |
+
"text"
|
264 |
+
],
|
265 |
+
title="Megatron-LM Parallel Group Visualization",
|
266 |
+
description="Select parallel sizes and types to visualize different parallel groups with distinct colors. This includes combinations of Data Parallel (DP), Tensor Model Parallel (TP), Pipeline Parallel (PP), and Context Parallel (CP). Note that the size of data parallelism is automatically calculated based on world_size (which is stable at 16 here) as well as tensor_model_parallel_size, pipeline_model_parallel_size, and context_parallel_size.",
|
267 |
+
live=True
|
268 |
+
)
|
269 |
+
|
270 |
+
return iface
|
271 |
+
|
272 |
+
# Create and launch the interface
|
273 |
+
iface = create_interface()
|
274 |
+
iface.launch(share=True)
|
descriptions.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
basic_texts = "对1D并行来说,每一个并行组的GPU上的颜色都是相同的。具体点:" \
|
2 |
+
"对于数据并行来说,每种相同的颜色表示当前进程组的这些rank上的模型权重也是相同的。" \
|
3 |
+
"对于张量模型并行来说,每种相同的颜色表示当前进程组的这些rank维护了一部分模型权重,做allreduce才能拿到完整的模型权重" \
|
4 |
+
"对于流水并行来说,每种相同的颜色表示当前进程组的这些rank维护的层都是串行的,需要通信传输数据," \
|
5 |
+
"而如果横着来看进程组我们可以发现每一行都对应了流水线并行的一个Stage,这样才能达到处理不同的micro batch流水起来的目的" \
|
6 |
+
"对于上下文并行来说,每种相同的颜色表示当前进程组维护的是一部分token的Q,K,V,然后通过在这个进程组里面循环跨卡传递K,V来计算完整的结果,这里的通信和流水并行类似是点对点的Send/Recieve。\n"
|
7 |
+
basic_texts += "对2D并行来说,这里使用了两种颜色空间分别表示两种不同类型的并行组。\n" \
|
8 |
+
"对3D并行来说,这里使用了三种颜色空间分别表示三种不同类型的并行组。\n" \
|
9 |
+
"Context Parallel是一种特殊的并行,也可以理解为是对Sequence Paralle的升级,只应用在FP8模式的Self Attention层,相比于Sequence Parallel可以进一步切分Transformer中所有的Activation节省显存," \
|
10 |
+
"Context Parallel同样也可以和DP/TP/PP以及SP相互组合,此外Context Parallel的官方实现在 https://github.com/NVIDIA/TransformerEngine 。\n"
|
11 |
+
|
12 |
+
descriptions = {
|
13 |
+
'Data Parallel': '对于不考虑Zero的数据并行,在一个数据并行组中,我们需要在Backward阶段对该组中所有rank的权重梯度做allreduce通信。推荐阅读以下paper或文章了解数据并行的原理:\n https://www.cs.cmu.edu/~muli/file/ps.pdf \n https://zhuanlan.zhihu.com/p/485208899 \n https://zhuanlan.zhihu.com/p/617133971 \n https://zhuanlan.zhihu.com/p/618865052',
|
14 |
+
'Tensor Model Parallel': '在张量模型并行中,模型的不同部分(例如,不同的中间张量或权重张量)被切分到不同的GPU上,我们在Forward和Backward阶段都要做allreduce来同步激活值和参数梯度,Tensor Model Parallel是Megatron-LM的核心并行方式之一。推荐阅读以下Paper或文章了解张量模型并行的原理:\n https://arxiv.org/pdf/1909.08053.pdf \n https://strint.notion.site/Megatron-LM-86381cfe51184b9c888be10ee82f3812\n https://zhuanlan.zhihu.com/p/622212228 ',
|
15 |
+
'Pipeline Parallel': '在流水并行中,模型的不同层被切分到不同的GPU上,并将输入数据切分为多个小批量(micro-batches),来实现在多个设备上并行处理这些批量数据,以此来加速训练过程。对于流水并行的一个组来说,每组内的GPU维护的层都是串行的,使用点对点的Send/Receive来发送接受数据,在文本框里的分组每一列都是一个完整的流水线Stage,这种组织方式才能达到处理不同的micro batch流水起来的目的。推荐阅读以下Paper或文章了解流水线并行的原理:\n https://arxiv.org/pdf/2104.04473.pdf \n https://zhuanlan.zhihu.com/p/678724323 \n https://zhuanlan.zhihu.com/p/613196255 \n https://juejin.cn/post/7063030243224879140 \n https://mp.weixin.qq.com/s/PXjYm9dN8C9B8svMQ7nOvw',
|
16 |
+
'Context Parallel': '在长文本训练场景中,随着序列长度的增加,每个微批处理的tokens数量增多,导致训练中激活值(Activations)的显著增加,这主要与序列长度成正比。最佳解决方案是采用Context Parallel(CP),它通过沿序列维度切分并通过循环通信完成self-attention计算。但是,当CP与Tensor Parallel(TP)的乘积超过8时,Nvlink的优势可能会消失,使得循环通信效率不高。尽管如此,Context Parallel的优点在于它仅影响数据并行(DP)组的大小,允许使用CP和DP结合的全部节点进行分布式存储,这对于ZERO系列优化器尤其有利,不过目前在Meagtron-LM中只有用core下面的模型实现并且使用FP8训练才能使用CP,Megatron-LM只是提供了一个Context Parall Group,而具体的CP Group里的Attention计算和通信都在transfomer Engine中实现。推荐阅读以下Paper或文章了解Context Paralle的原理:\n https://www.zhihu.com/question/637961859/answer/3349322621 \n https://mp.weixin.qq.com/s/u4gG1WZ73mgH9mEKQQCRww',
|
17 |
+
'DP+TP': '对于一个Batch的数据来说,TP需要在每一个TP Group里对Transformer层的Forward和Backward的Activation都做AllReduce,而DP只需要在网络的Backward阶段对所有参数做一次AllReduce,数据并行的通信量一般会显著少于模型并行。因此,在使用DP+TP组合时,我们应当在保证模型不OOM的情况下尽量提升数据并行的并行度。推荐阅读以下paper或文章了解混合并行��实践经验:\n https://mp.weixin.qq.com/s/D-14J482SFQf-zh-EFa-1w',
|
18 |
+
'DP+PP': '我们应当优先考虑数据并行和流水线并行的组合,而不是数据并行和模型并行,特别是在节点间带宽比较低并且模型参数很大的情况下特别有用。DP+PP的组合相比于DP+TP一般具有更小的通信量,不过PP会存在流水线气泡的问题,不过最新的一些Infra研究比如ZB-H1/H2等等可以将流水线的气泡占比降到非常低,所以流水线并行是非常有用的扩大模型规模的并行方式。此外流水线并行还常和Gradient CheckPointing技术混合使用以继续扩大模型规模,同时缓解流水线不同Stage可能存在的负载均衡问题。推荐阅读以下paper或者文章了解混合并行的实践经验:\n https://mp.weixin.qq.com/s/D-14J482SFQf-zh-EFa-1w \n https://mp.weixin.qq.com/s/PXjYm9dN8C9B8svMQ7nOvw \n https://zhuanlan.zhihu.com/p/678724323 ',
|
19 |
+
'DP+TP+PP': '对于单一的PP或者TP(TP一般控制在机器内,不超过8)无法满足需求的模型规模例如34b以上LLM Pretrain,我们需要使用到DP+TP+PP的混合并行。推荐阅读以下paper或文章了解混合并行的实践经验:\n https://juejin.cn/post/7063030243224879140 \n https://zhuanlan.zhihu.com/p/629121480 \n https://zhuanlan.zhihu.com/p/670374745',
|
20 |
+
'CP+DP': '请查看Context Parallel的介绍,建议阅读下面的Paper或者文章了解Context Parallel的原理:\n https://www.zhihu.com/question/637961859/answer/3349322621 \n https://mp.weixin.qq.com/s/u4gG1WZ73mgH9mEKQQCRww',
|
21 |
+
'CP+DP+TP': '请查看Context Parallel的介绍,建议阅读下面的Paper或者文章了解Context Parallel的原理:\n https://www.zhihu.com/question/637961859/answer/3349322621 \n https://mp.weixin.qq.com/s/u4gG1WZ73mgH9mEKQQCRww',
|
22 |
+
'CP+DP+PP': '请查看Context Parallel的介绍,建议阅读下面的Paper或者文章了解Context Parallel的原理:\n https://www.zhihu.com/question/637961859/answer/3349322621 \n https://mp.weixin.qq.com/s/u4gG1WZ73mgH9mEKQQCRww',
|
23 |
+
'CP+DP+TP+PP': '请查看Context Parallel的介绍,建议阅读下面的Paper或者文章了解Context Parallel的原理:\n https://www.zhihu.com/question/637961859/answer/3349322621 \n https://mp.weixin.qq.com/s/u4gG1WZ73mgH9mEKQQCRww',
|
24 |
+
}
|