Delete zero_to_fp32.py
Browse files- zero_to_fp32.py +0 -604
zero_to_fp32.py
DELETED
@@ -1,604 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
|
3 |
-
# Copyright (c) Microsoft Corporation.
|
4 |
-
# SPDX-License-Identifier: Apache-2.0
|
5 |
-
|
6 |
-
# DeepSpeed Team
|
7 |
-
|
8 |
-
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
9 |
-
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
10 |
-
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
11 |
-
# application.
|
12 |
-
#
|
13 |
-
# example: python zero_to_fp32.py . pytorch_model.bin
|
14 |
-
|
15 |
-
import argparse
|
16 |
-
import torch
|
17 |
-
import glob
|
18 |
-
import math
|
19 |
-
import os
|
20 |
-
import re
|
21 |
-
from collections import OrderedDict
|
22 |
-
from dataclasses import dataclass
|
23 |
-
|
24 |
-
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
25 |
-
# DeepSpeed data structures it has to be available in the current python environment.
|
26 |
-
from deepspeed.utils import logger
|
27 |
-
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
28 |
-
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
29 |
-
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
30 |
-
|
31 |
-
|
32 |
-
@dataclass
|
33 |
-
class zero_model_state:
|
34 |
-
buffers: dict()
|
35 |
-
param_shapes: dict()
|
36 |
-
shared_params: list
|
37 |
-
ds_version: int
|
38 |
-
frozen_param_shapes: dict()
|
39 |
-
frozen_param_fragments: dict()
|
40 |
-
|
41 |
-
|
42 |
-
debug = 0
|
43 |
-
|
44 |
-
# load to cpu
|
45 |
-
device = torch.device('cpu')
|
46 |
-
|
47 |
-
|
48 |
-
def atoi(text):
|
49 |
-
return int(text) if text.isdigit() else text
|
50 |
-
|
51 |
-
|
52 |
-
def natural_keys(text):
|
53 |
-
'''
|
54 |
-
alist.sort(key=natural_keys) sorts in human order
|
55 |
-
http://nedbatchelder.com/blog/200712/human_sorting.html
|
56 |
-
(See Toothy's implementation in the comments)
|
57 |
-
'''
|
58 |
-
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
59 |
-
|
60 |
-
|
61 |
-
def get_model_state_file(checkpoint_dir, zero_stage):
|
62 |
-
if not os.path.isdir(checkpoint_dir):
|
63 |
-
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
64 |
-
|
65 |
-
# there should be only one file
|
66 |
-
if zero_stage <= 2:
|
67 |
-
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
68 |
-
elif zero_stage == 3:
|
69 |
-
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
70 |
-
|
71 |
-
if not os.path.exists(file):
|
72 |
-
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
73 |
-
|
74 |
-
return file
|
75 |
-
|
76 |
-
|
77 |
-
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
78 |
-
# XXX: need to test that this simple glob rule works for multi-node setup too
|
79 |
-
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
80 |
-
|
81 |
-
if len(ckpt_files) == 0:
|
82 |
-
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
83 |
-
|
84 |
-
return ckpt_files
|
85 |
-
|
86 |
-
|
87 |
-
def get_optim_files(checkpoint_dir):
|
88 |
-
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
89 |
-
|
90 |
-
|
91 |
-
def get_model_state_files(checkpoint_dir):
|
92 |
-
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
93 |
-
|
94 |
-
|
95 |
-
def parse_model_states(files):
|
96 |
-
zero_model_states = []
|
97 |
-
for file in files:
|
98 |
-
state_dict = torch.load(file, map_location=device)
|
99 |
-
|
100 |
-
if BUFFER_NAMES not in state_dict:
|
101 |
-
raise ValueError(f"{file} is not a model state checkpoint")
|
102 |
-
buffer_names = state_dict[BUFFER_NAMES]
|
103 |
-
if debug:
|
104 |
-
print("Found buffers:", buffer_names)
|
105 |
-
|
106 |
-
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
107 |
-
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
108 |
-
param_shapes = state_dict[PARAM_SHAPES]
|
109 |
-
|
110 |
-
# collect parameters that are included in param_shapes
|
111 |
-
param_names = []
|
112 |
-
for s in param_shapes:
|
113 |
-
for name in s.keys():
|
114 |
-
param_names.append(name)
|
115 |
-
|
116 |
-
# update with frozen parameters
|
117 |
-
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
118 |
-
if frozen_param_shapes is not None:
|
119 |
-
if debug:
|
120 |
-
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
121 |
-
param_names += list(frozen_param_shapes.keys())
|
122 |
-
|
123 |
-
# handle shared params
|
124 |
-
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
125 |
-
|
126 |
-
ds_version = state_dict.get(DS_VERSION, None)
|
127 |
-
|
128 |
-
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
129 |
-
|
130 |
-
z_model_state = zero_model_state(buffers=buffers,
|
131 |
-
param_shapes=param_shapes,
|
132 |
-
shared_params=shared_params,
|
133 |
-
ds_version=ds_version,
|
134 |
-
frozen_param_shapes=frozen_param_shapes,
|
135 |
-
frozen_param_fragments=frozen_param_fragments)
|
136 |
-
zero_model_states.append(z_model_state)
|
137 |
-
|
138 |
-
return zero_model_states
|
139 |
-
|
140 |
-
|
141 |
-
def parse_optim_states(files, ds_checkpoint_dir):
|
142 |
-
|
143 |
-
total_files = len(files)
|
144 |
-
state_dicts = []
|
145 |
-
for f in files:
|
146 |
-
state_dict = torch.load(f, map_location=device)
|
147 |
-
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
148 |
-
# and also handle the case where it was already removed by another helper script
|
149 |
-
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
150 |
-
state_dicts.append(state_dict)
|
151 |
-
|
152 |
-
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
153 |
-
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
154 |
-
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
155 |
-
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
156 |
-
|
157 |
-
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
158 |
-
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
159 |
-
# use the max of the partition_count to get the dp world_size.
|
160 |
-
|
161 |
-
if type(world_size) is list:
|
162 |
-
world_size = max(world_size)
|
163 |
-
|
164 |
-
if world_size != total_files:
|
165 |
-
raise ValueError(
|
166 |
-
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
167 |
-
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
168 |
-
)
|
169 |
-
|
170 |
-
# the groups are named differently in each stage
|
171 |
-
if zero_stage <= 2:
|
172 |
-
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
173 |
-
elif zero_stage == 3:
|
174 |
-
fp32_groups_key = FP32_FLAT_GROUPS
|
175 |
-
else:
|
176 |
-
raise ValueError(f"unknown zero stage {zero_stage}")
|
177 |
-
|
178 |
-
if zero_stage <= 2:
|
179 |
-
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
180 |
-
elif zero_stage == 3:
|
181 |
-
# if there is more than one param group, there will be multiple flattened tensors - one
|
182 |
-
# flattened tensor per group - for simplicity merge them into a single tensor
|
183 |
-
#
|
184 |
-
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
185 |
-
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
186 |
-
|
187 |
-
fp32_flat_groups = [
|
188 |
-
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
189 |
-
]
|
190 |
-
|
191 |
-
return zero_stage, world_size, fp32_flat_groups
|
192 |
-
|
193 |
-
|
194 |
-
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
195 |
-
"""
|
196 |
-
Returns fp32 state_dict reconstructed from ds checkpoint
|
197 |
-
|
198 |
-
Args:
|
199 |
-
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
200 |
-
|
201 |
-
"""
|
202 |
-
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
203 |
-
|
204 |
-
optim_files = get_optim_files(ds_checkpoint_dir)
|
205 |
-
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
206 |
-
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
207 |
-
|
208 |
-
model_files = get_model_state_files(ds_checkpoint_dir)
|
209 |
-
|
210 |
-
zero_model_states = parse_model_states(model_files)
|
211 |
-
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
212 |
-
|
213 |
-
if zero_stage <= 2:
|
214 |
-
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
215 |
-
exclude_frozen_parameters)
|
216 |
-
elif zero_stage == 3:
|
217 |
-
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
218 |
-
exclude_frozen_parameters)
|
219 |
-
|
220 |
-
|
221 |
-
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
222 |
-
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
223 |
-
return
|
224 |
-
|
225 |
-
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
226 |
-
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
227 |
-
|
228 |
-
if debug:
|
229 |
-
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
230 |
-
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
231 |
-
|
232 |
-
wanted_params = len(frozen_param_shapes)
|
233 |
-
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
234 |
-
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
235 |
-
print(f'Frozen params: Have {avail_numel} numels to process.')
|
236 |
-
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
237 |
-
|
238 |
-
total_params = 0
|
239 |
-
total_numel = 0
|
240 |
-
for name, shape in frozen_param_shapes.items():
|
241 |
-
total_params += 1
|
242 |
-
unpartitioned_numel = shape.numel()
|
243 |
-
total_numel += unpartitioned_numel
|
244 |
-
|
245 |
-
state_dict[name] = frozen_param_fragments[name]
|
246 |
-
|
247 |
-
if debug:
|
248 |
-
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
249 |
-
|
250 |
-
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
251 |
-
|
252 |
-
|
253 |
-
def _has_callable(obj, fn):
|
254 |
-
attr = getattr(obj, fn, None)
|
255 |
-
return callable(attr)
|
256 |
-
|
257 |
-
|
258 |
-
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
259 |
-
param_shapes = zero_model_states[0].param_shapes
|
260 |
-
|
261 |
-
# Reconstruction protocol:
|
262 |
-
#
|
263 |
-
# XXX: document this
|
264 |
-
|
265 |
-
if debug:
|
266 |
-
for i in range(world_size):
|
267 |
-
for j in range(len(fp32_flat_groups[0])):
|
268 |
-
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
269 |
-
|
270 |
-
# XXX: memory usage doubles here (zero2)
|
271 |
-
num_param_groups = len(fp32_flat_groups[0])
|
272 |
-
merged_single_partition_of_fp32_groups = []
|
273 |
-
for i in range(num_param_groups):
|
274 |
-
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
275 |
-
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
276 |
-
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
277 |
-
avail_numel = sum(
|
278 |
-
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
279 |
-
|
280 |
-
if debug:
|
281 |
-
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
282 |
-
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
283 |
-
# not asserting if there is a mismatch due to possible padding
|
284 |
-
print(f"Have {avail_numel} numels to process.")
|
285 |
-
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
286 |
-
|
287 |
-
# params
|
288 |
-
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
289 |
-
# out-of-core computing solution
|
290 |
-
total_numel = 0
|
291 |
-
total_params = 0
|
292 |
-
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
293 |
-
offset = 0
|
294 |
-
avail_numel = full_single_fp32_vector.numel()
|
295 |
-
for name, shape in shapes.items():
|
296 |
-
|
297 |
-
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
298 |
-
total_numel += unpartitioned_numel
|
299 |
-
total_params += 1
|
300 |
-
|
301 |
-
if debug:
|
302 |
-
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
303 |
-
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
304 |
-
offset += unpartitioned_numel
|
305 |
-
|
306 |
-
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
307 |
-
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
308 |
-
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
309 |
-
# live optimizer object, so we are checking that the numbers are within the right range
|
310 |
-
align_to = 2 * world_size
|
311 |
-
|
312 |
-
def zero2_align(x):
|
313 |
-
return align_to * math.ceil(x / align_to)
|
314 |
-
|
315 |
-
if debug:
|
316 |
-
print(f"original offset={offset}, avail_numel={avail_numel}")
|
317 |
-
|
318 |
-
offset = zero2_align(offset)
|
319 |
-
avail_numel = zero2_align(avail_numel)
|
320 |
-
|
321 |
-
if debug:
|
322 |
-
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
323 |
-
|
324 |
-
# Sanity check
|
325 |
-
if offset != avail_numel:
|
326 |
-
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
327 |
-
|
328 |
-
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
329 |
-
|
330 |
-
|
331 |
-
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
332 |
-
exclude_frozen_parameters):
|
333 |
-
state_dict = OrderedDict()
|
334 |
-
|
335 |
-
# buffers
|
336 |
-
buffers = zero_model_states[0].buffers
|
337 |
-
state_dict.update(buffers)
|
338 |
-
if debug:
|
339 |
-
print(f"added {len(buffers)} buffers")
|
340 |
-
|
341 |
-
if not exclude_frozen_parameters:
|
342 |
-
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
343 |
-
|
344 |
-
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
345 |
-
|
346 |
-
# recover shared parameters
|
347 |
-
for pair in zero_model_states[0].shared_params:
|
348 |
-
if pair[1] in state_dict:
|
349 |
-
state_dict[pair[0]] = state_dict[pair[1]]
|
350 |
-
|
351 |
-
return state_dict
|
352 |
-
|
353 |
-
|
354 |
-
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
355 |
-
remainder = unpartitioned_numel % world_size
|
356 |
-
padding_numel = (world_size - remainder) if remainder else 0
|
357 |
-
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
358 |
-
return partitioned_numel, padding_numel
|
359 |
-
|
360 |
-
|
361 |
-
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
362 |
-
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
363 |
-
return
|
364 |
-
|
365 |
-
if debug:
|
366 |
-
for i in range(world_size):
|
367 |
-
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
368 |
-
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
369 |
-
|
370 |
-
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
371 |
-
wanted_params = len(frozen_param_shapes)
|
372 |
-
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
373 |
-
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
374 |
-
print(f'Frozen params: Have {avail_numel} numels to process.')
|
375 |
-
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
376 |
-
|
377 |
-
total_params = 0
|
378 |
-
total_numel = 0
|
379 |
-
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
380 |
-
total_params += 1
|
381 |
-
unpartitioned_numel = shape.numel()
|
382 |
-
total_numel += unpartitioned_numel
|
383 |
-
|
384 |
-
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
385 |
-
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
386 |
-
|
387 |
-
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
388 |
-
|
389 |
-
if debug:
|
390 |
-
print(
|
391 |
-
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
392 |
-
)
|
393 |
-
|
394 |
-
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
395 |
-
|
396 |
-
|
397 |
-
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
398 |
-
param_shapes = zero_model_states[0].param_shapes
|
399 |
-
avail_numel = fp32_flat_groups[0].numel() * world_size
|
400 |
-
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
401 |
-
# param, re-consolidating each param, while dealing with padding if any
|
402 |
-
|
403 |
-
# merge list of dicts, preserving order
|
404 |
-
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
405 |
-
|
406 |
-
if debug:
|
407 |
-
for i in range(world_size):
|
408 |
-
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
409 |
-
|
410 |
-
wanted_params = len(param_shapes)
|
411 |
-
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
412 |
-
# not asserting if there is a mismatch due to possible padding
|
413 |
-
avail_numel = fp32_flat_groups[0].numel() * world_size
|
414 |
-
print(f"Trainable params: Have {avail_numel} numels to process.")
|
415 |
-
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
416 |
-
|
417 |
-
# params
|
418 |
-
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
419 |
-
# out-of-core computing solution
|
420 |
-
offset = 0
|
421 |
-
total_numel = 0
|
422 |
-
total_params = 0
|
423 |
-
for name, shape in param_shapes.items():
|
424 |
-
|
425 |
-
unpartitioned_numel = shape.numel()
|
426 |
-
total_numel += unpartitioned_numel
|
427 |
-
total_params += 1
|
428 |
-
|
429 |
-
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
430 |
-
|
431 |
-
if debug:
|
432 |
-
print(
|
433 |
-
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
434 |
-
)
|
435 |
-
|
436 |
-
# XXX: memory usage doubles here
|
437 |
-
state_dict[name] = torch.cat(
|
438 |
-
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
439 |
-
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
440 |
-
offset += partitioned_numel
|
441 |
-
|
442 |
-
offset *= world_size
|
443 |
-
|
444 |
-
# Sanity check
|
445 |
-
if offset != avail_numel:
|
446 |
-
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
447 |
-
|
448 |
-
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
449 |
-
|
450 |
-
|
451 |
-
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
452 |
-
exclude_frozen_parameters):
|
453 |
-
state_dict = OrderedDict()
|
454 |
-
|
455 |
-
# buffers
|
456 |
-
buffers = zero_model_states[0].buffers
|
457 |
-
state_dict.update(buffers)
|
458 |
-
if debug:
|
459 |
-
print(f"added {len(buffers)} buffers")
|
460 |
-
|
461 |
-
if not exclude_frozen_parameters:
|
462 |
-
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
463 |
-
|
464 |
-
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
465 |
-
|
466 |
-
# recover shared parameters
|
467 |
-
for pair in zero_model_states[0].shared_params:
|
468 |
-
if pair[1] in state_dict:
|
469 |
-
state_dict[pair[0]] = state_dict[pair[1]]
|
470 |
-
|
471 |
-
return state_dict
|
472 |
-
|
473 |
-
|
474 |
-
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
|
475 |
-
"""
|
476 |
-
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
477 |
-
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
478 |
-
via a model hub.
|
479 |
-
|
480 |
-
Args:
|
481 |
-
- ``checkpoint_dir``: path to the desired checkpoint folder
|
482 |
-
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
483 |
-
- ``exclude_frozen_parameters``: exclude frozen parameters
|
484 |
-
|
485 |
-
Returns:
|
486 |
-
- pytorch ``state_dict``
|
487 |
-
|
488 |
-
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
489 |
-
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
490 |
-
the checkpoint.
|
491 |
-
|
492 |
-
A typical usage might be ::
|
493 |
-
|
494 |
-
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
495 |
-
# do the training and checkpoint saving
|
496 |
-
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
497 |
-
model = model.cpu() # move to cpu
|
498 |
-
model.load_state_dict(state_dict)
|
499 |
-
# submit to model hub or save the model to share with others
|
500 |
-
|
501 |
-
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
502 |
-
application. i.e. you will need to re-initialize the deepspeed engine, since
|
503 |
-
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
504 |
-
|
505 |
-
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
506 |
-
|
507 |
-
"""
|
508 |
-
if tag is None:
|
509 |
-
latest_path = os.path.join(checkpoint_dir, 'latest')
|
510 |
-
if os.path.isfile(latest_path):
|
511 |
-
with open(latest_path, 'r') as fd:
|
512 |
-
tag = fd.read().strip()
|
513 |
-
else:
|
514 |
-
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
515 |
-
|
516 |
-
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
517 |
-
|
518 |
-
if not os.path.isdir(ds_checkpoint_dir):
|
519 |
-
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
520 |
-
|
521 |
-
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
522 |
-
|
523 |
-
|
524 |
-
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
|
525 |
-
"""
|
526 |
-
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
527 |
-
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
528 |
-
|
529 |
-
Args:
|
530 |
-
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
531 |
-
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
|
532 |
-
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
533 |
-
- ``exclude_frozen_parameters``: exclude frozen parameters
|
534 |
-
"""
|
535 |
-
|
536 |
-
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
|
537 |
-
print(f"Saving fp32 state dict to {output_file}")
|
538 |
-
torch.save(state_dict, output_file)
|
539 |
-
|
540 |
-
|
541 |
-
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
542 |
-
"""
|
543 |
-
1. Put the provided model to cpu
|
544 |
-
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
545 |
-
3. Load it into the provided model
|
546 |
-
|
547 |
-
Args:
|
548 |
-
- ``model``: the model object to update
|
549 |
-
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
550 |
-
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
551 |
-
|
552 |
-
Returns:
|
553 |
-
- ``model`: modified model
|
554 |
-
|
555 |
-
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
556 |
-
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
557 |
-
conveniently placed for you in the checkpoint folder.
|
558 |
-
|
559 |
-
A typical usage might be ::
|
560 |
-
|
561 |
-
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
562 |
-
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
563 |
-
# submit to model hub or save the model to share with others
|
564 |
-
|
565 |
-
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
566 |
-
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
567 |
-
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
568 |
-
|
569 |
-
"""
|
570 |
-
logger.info(f"Extracting fp32 weights")
|
571 |
-
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
572 |
-
|
573 |
-
logger.info(f"Overwriting model with fp32 weights")
|
574 |
-
model = model.cpu()
|
575 |
-
model.load_state_dict(state_dict, strict=False)
|
576 |
-
|
577 |
-
return model
|
578 |
-
|
579 |
-
|
580 |
-
if __name__ == "__main__":
|
581 |
-
|
582 |
-
parser = argparse.ArgumentParser()
|
583 |
-
parser.add_argument("checkpoint_dir",
|
584 |
-
type=str,
|
585 |
-
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
586 |
-
parser.add_argument(
|
587 |
-
"output_file",
|
588 |
-
type=str,
|
589 |
-
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
|
590 |
-
parser.add_argument("-t",
|
591 |
-
"--tag",
|
592 |
-
type=str,
|
593 |
-
default=None,
|
594 |
-
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
595 |
-
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
596 |
-
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
597 |
-
args = parser.parse_args()
|
598 |
-
|
599 |
-
debug = args.debug
|
600 |
-
|
601 |
-
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
602 |
-
args.output_file,
|
603 |
-
tag=args.tag,
|
604 |
-
exclude_frozen_parameters=args.exclude_frozen_parameters)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|