keehun commited on
Commit
454c0d2
1 Parent(s): f238c48

End of training

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +17 -0
  2. checkpoint-1000/latest +1 -0
  3. checkpoint-1000/pytorch_model/mp_rank_00_model_states.pt +3 -0
  4. checkpoint-1000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  5. checkpoint-1000/random_states_0.pkl +3 -0
  6. checkpoint-1000/scheduler.bin +3 -0
  7. checkpoint-1000/zero_to_fp32.py +578 -0
  8. checkpoint-1500/latest +1 -0
  9. checkpoint-1500/pytorch_model/mp_rank_00_model_states.pt +3 -0
  10. checkpoint-1500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  11. checkpoint-1500/random_states_0.pkl +3 -0
  12. checkpoint-1500/scheduler.bin +3 -0
  13. checkpoint-1500/zero_to_fp32.py +578 -0
  14. checkpoint-2000/latest +1 -0
  15. checkpoint-2000/pytorch_model/mp_rank_00_model_states.pt +3 -0
  16. checkpoint-2000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  17. checkpoint-2000/random_states_0.pkl +3 -0
  18. checkpoint-2000/scheduler.bin +3 -0
  19. checkpoint-2000/zero_to_fp32.py +578 -0
  20. checkpoint-2500/latest +1 -0
  21. checkpoint-2500/pytorch_model/mp_rank_00_model_states.pt +3 -0
  22. checkpoint-2500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  23. checkpoint-2500/random_states_0.pkl +3 -0
  24. checkpoint-2500/scheduler.bin +3 -0
  25. checkpoint-2500/zero_to_fp32.py +578 -0
  26. checkpoint-3000/latest +1 -0
  27. checkpoint-3000/pytorch_model/mp_rank_00_model_states.pt +3 -0
  28. checkpoint-3000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  29. checkpoint-3000/random_states_0.pkl +3 -0
  30. checkpoint-3000/scheduler.bin +3 -0
  31. checkpoint-3000/zero_to_fp32.py +578 -0
  32. checkpoint-500/latest +1 -0
  33. checkpoint-500/pytorch_model/mp_rank_00_model_states.pt +3 -0
  34. checkpoint-500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  35. checkpoint-500/random_states_0.pkl +3 -0
  36. checkpoint-500/scheduler.bin +3 -0
  37. checkpoint-500/zero_to_fp32.py +578 -0
  38. feature_extractor/preprocessor_config.json +28 -0
  39. learned_embeds-steps-3000.bin +3 -0
  40. learned_embeds.bin +3 -0
  41. logs/textual_inversion/1689321680.473668/events.out.tfevents.1689321680.gpu-1.2126126.1 +3 -0
  42. logs/textual_inversion/1689321680.4759483/hparams.yml +46 -0
  43. logs/textual_inversion/events.out.tfevents.1689321680.gpu-1.2126126.0 +3 -0
  44. model_index.json +34 -0
  45. safety_checker/config.json +168 -0
  46. safety_checker/pytorch_model.bin +3 -0
  47. scheduler/scheduler_config.json +15 -0
  48. text_encoder/config.json +25 -0
  49. text_encoder/pytorch_model.bin +3 -0
  50. tokenizer/added_tokens.json +3 -0
README.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ license: creativeml-openrail-m
4
+ base_model: runwayml/stable-diffusion-v1-5
5
+ tags:
6
+ - stable-diffusion
7
+ - stable-diffusion-diffusers
8
+ - text-to-image
9
+ - diffusers
10
+ - textual_inversion
11
+ inference: true
12
+ ---
13
+
14
+ # Textual inversion text2image fine-tuning - keehun/textual_inversion_slime_up
15
+ These are textual inversion adaption weights for runwayml/stable-diffusion-v1-5. You can find some example images in the following.
16
+
17
+
checkpoint-1000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-1000/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:096db2b1eeaecd7326681ea66bbb599b14ec484e8393cf331e39614602c64f13
3
+ size 832859281
checkpoint-1000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa0ad71159778a0cdca3caa916037bf14d19bd2632f7bf223b87f75bd8589fb8
3
+ size 455356569
checkpoint-1000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff347a34d87cb845f8f6e748b938f9a1f0a9c0053ca964123f3b7d0d309ae041
3
+ size 17747
checkpoint-1000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbb19b948b33a080098712534e53a991114e50367c7efbe3f9b4d0cec03eb540
3
+ size 563
checkpoint-1000/zero_to_fp32.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage == 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dicts.append(torch.load(f, map_location=device))
147
+
148
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
149
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
150
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
151
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
152
+
153
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
154
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
155
+ # use the max of the partition_count to get the dp world_size.
156
+
157
+ if type(world_size) is list:
158
+ world_size = max(world_size)
159
+
160
+ if world_size != total_files:
161
+ raise ValueError(
162
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
163
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
164
+ )
165
+
166
+ # the groups are named differently in each stage
167
+ if zero_stage == 2:
168
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
169
+ elif zero_stage == 3:
170
+ fp32_groups_key = FP32_FLAT_GROUPS
171
+ else:
172
+ raise ValueError(f"unknown zero stage {zero_stage}")
173
+
174
+ if zero_stage == 2:
175
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
176
+ elif zero_stage == 3:
177
+ # if there is more than one param group, there will be multiple flattened tensors - one
178
+ # flattened tensor per group - for simplicity merge them into a single tensor
179
+ #
180
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
181
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
182
+
183
+ fp32_flat_groups = [
184
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
185
+ ]
186
+
187
+ return zero_stage, world_size, fp32_flat_groups
188
+
189
+
190
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
191
+ """
192
+ Returns fp32 state_dict reconstructed from ds checkpoint
193
+
194
+ Args:
195
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
196
+
197
+ """
198
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
199
+
200
+ optim_files = get_optim_files(ds_checkpoint_dir)
201
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
202
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
203
+
204
+ model_files = get_model_state_files(ds_checkpoint_dir)
205
+
206
+ zero_model_states = parse_model_states(model_files)
207
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
208
+
209
+ if zero_stage == 2:
210
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
211
+ elif zero_stage == 3:
212
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
248
+ param_shapes = zero_model_states[0].param_shapes
249
+
250
+ # Reconstruction protocol:
251
+ #
252
+ # XXX: document this
253
+
254
+ if debug:
255
+ for i in range(world_size):
256
+ for j in range(len(fp32_flat_groups[0])):
257
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
258
+
259
+ # XXX: memory usage doubles here (zero2)
260
+ num_param_groups = len(fp32_flat_groups[0])
261
+ merged_single_partition_of_fp32_groups = []
262
+ for i in range(num_param_groups):
263
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
264
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
265
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
266
+ avail_numel = sum(
267
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
268
+
269
+ if debug:
270
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
271
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
272
+ # not asserting if there is a mismatch due to possible padding
273
+ print(f"Have {avail_numel} numels to process.")
274
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
275
+
276
+ # params
277
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
278
+ # out-of-core computing solution
279
+ total_numel = 0
280
+ total_params = 0
281
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
282
+ offset = 0
283
+ avail_numel = full_single_fp32_vector.numel()
284
+ for name, shape in shapes.items():
285
+
286
+ unpartitioned_numel = shape.numel()
287
+ total_numel += unpartitioned_numel
288
+ total_params += 1
289
+
290
+ if debug:
291
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
292
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
293
+ offset += unpartitioned_numel
294
+
295
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
296
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
297
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
298
+ # live optimizer object, so we are checking that the numbers are within the right range
299
+ align_to = 2 * world_size
300
+
301
+ def zero2_align(x):
302
+ return align_to * math.ceil(x / align_to)
303
+
304
+ if debug:
305
+ print(f"original offset={offset}, avail_numel={avail_numel}")
306
+
307
+ offset = zero2_align(offset)
308
+ avail_numel = zero2_align(avail_numel)
309
+
310
+ if debug:
311
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
312
+
313
+ # Sanity check
314
+ if offset != avail_numel:
315
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
316
+
317
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
318
+
319
+
320
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
321
+ state_dict = OrderedDict()
322
+
323
+ # buffers
324
+ buffers = zero_model_states[0].buffers
325
+ state_dict.update(buffers)
326
+ if debug:
327
+ print(f"added {len(buffers)} buffers")
328
+
329
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
330
+
331
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
332
+
333
+ # recover shared parameters
334
+ for pair in zero_model_states[0].shared_params:
335
+ if pair[1] in state_dict:
336
+ state_dict[pair[0]] = state_dict[pair[1]]
337
+
338
+ return state_dict
339
+
340
+
341
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
342
+ remainder = unpartitioned_numel % world_size
343
+ padding_numel = (world_size - remainder) if remainder else 0
344
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
345
+ return partitioned_numel, padding_numel
346
+
347
+
348
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
349
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
350
+ return
351
+
352
+ if debug:
353
+ for i in range(world_size):
354
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
355
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
356
+
357
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
358
+ wanted_params = len(frozen_param_shapes)
359
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
360
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
361
+ print(f'Frozen params: Have {avail_numel} numels to process.')
362
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
363
+
364
+ total_params = 0
365
+ total_numel = 0
366
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
367
+ total_params += 1
368
+ unpartitioned_numel = shape.numel()
369
+ total_numel += unpartitioned_numel
370
+
371
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
372
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
373
+
374
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
375
+
376
+ if debug:
377
+ print(
378
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
379
+ )
380
+
381
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
382
+
383
+
384
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
385
+ param_shapes = zero_model_states[0].param_shapes
386
+ avail_numel = fp32_flat_groups[0].numel() * world_size
387
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
388
+ # param, re-consolidating each param, while dealing with padding if any
389
+
390
+ # merge list of dicts, preserving order
391
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
392
+
393
+ if debug:
394
+ for i in range(world_size):
395
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
396
+
397
+ wanted_params = len(param_shapes)
398
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
399
+ # not asserting if there is a mismatch due to possible padding
400
+ avail_numel = fp32_flat_groups[0].numel() * world_size
401
+ print(f"Trainable params: Have {avail_numel} numels to process.")
402
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
403
+
404
+ # params
405
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
406
+ # out-of-core computing solution
407
+ offset = 0
408
+ total_numel = 0
409
+ total_params = 0
410
+ for name, shape in param_shapes.items():
411
+
412
+ unpartitioned_numel = shape.numel()
413
+ total_numel += unpartitioned_numel
414
+ total_params += 1
415
+
416
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
417
+
418
+ if debug:
419
+ print(
420
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
421
+ )
422
+
423
+ # XXX: memory usage doubles here
424
+ state_dict[name] = torch.cat(
425
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
426
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
427
+ offset += partitioned_numel
428
+
429
+ offset *= world_size
430
+
431
+ # Sanity check
432
+ if offset != avail_numel:
433
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
434
+
435
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
436
+
437
+
438
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
439
+ state_dict = OrderedDict()
440
+
441
+ # buffers
442
+ buffers = zero_model_states[0].buffers
443
+ state_dict.update(buffers)
444
+ if debug:
445
+ print(f"added {len(buffers)} buffers")
446
+
447
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
448
+
449
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
450
+
451
+ # recover shared parameters
452
+ for pair in zero_model_states[0].shared_params:
453
+ if pair[1] in state_dict:
454
+ state_dict[pair[0]] = state_dict[pair[1]]
455
+
456
+ return state_dict
457
+
458
+
459
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
460
+ """
461
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
462
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
463
+ via a model hub.
464
+
465
+ Args:
466
+ - ``checkpoint_dir``: path to the desired checkpoint folder
467
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
468
+
469
+ Returns:
470
+ - pytorch ``state_dict``
471
+
472
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
473
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
474
+ the checkpoint.
475
+
476
+ A typical usage might be ::
477
+
478
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
479
+ # do the training and checkpoint saving
480
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
481
+ model = model.cpu() # move to cpu
482
+ model.load_state_dict(state_dict)
483
+ # submit to model hub or save the model to share with others
484
+
485
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
486
+ application. i.e. you will need to re-initialize the deepspeed engine, since
487
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
488
+
489
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
490
+
491
+ """
492
+ if tag is None:
493
+ latest_path = os.path.join(checkpoint_dir, 'latest')
494
+ if os.path.isfile(latest_path):
495
+ with open(latest_path, 'r') as fd:
496
+ tag = fd.read().strip()
497
+ else:
498
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
499
+
500
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
501
+
502
+ if not os.path.isdir(ds_checkpoint_dir):
503
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
504
+
505
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
506
+
507
+
508
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
509
+ """
510
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
511
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
512
+
513
+ Args:
514
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
515
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
516
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
517
+ """
518
+
519
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
520
+ print(f"Saving fp32 state dict to {output_file}")
521
+ torch.save(state_dict, output_file)
522
+
523
+
524
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
525
+ """
526
+ 1. Put the provided model to cpu
527
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
528
+ 3. Load it into the provided model
529
+
530
+ Args:
531
+ - ``model``: the model object to update
532
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
533
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
534
+
535
+ Returns:
536
+ - ``model`: modified model
537
+
538
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
539
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
540
+ conveniently placed for you in the checkpoint folder.
541
+
542
+ A typical usage might be ::
543
+
544
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
545
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
546
+ # submit to model hub or save the model to share with others
547
+
548
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
549
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
550
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
551
+
552
+ """
553
+ logger.info(f"Extracting fp32 weights")
554
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
555
+
556
+ logger.info(f"Overwriting model with fp32 weights")
557
+ model = model.cpu()
558
+ model.load_state_dict(state_dict, strict=False)
559
+
560
+ return model
561
+
562
+
563
+ if __name__ == "__main__":
564
+
565
+ parser = argparse.ArgumentParser()
566
+ parser.add_argument("checkpoint_dir",
567
+ type=str,
568
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
569
+ parser.add_argument(
570
+ "output_file",
571
+ type=str,
572
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
573
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
574
+ args = parser.parse_args()
575
+
576
+ debug = args.debug
577
+
578
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
checkpoint-1500/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-1500/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67a2a56df29cd953a0bb4519e62143e367308e2f7975d186a9b5ebff5356c116
3
+ size 832859281
checkpoint-1500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebe9c0ba7a75046ade2a61723ab5000738a655d2aa1dcf1f8e98504987781c8b
3
+ size 455356569
checkpoint-1500/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:168bbc5e825e6f3c8de8b1e2a0a6abb9a11db9fbb38aa3e9b4c1c94ec2212d2e
3
+ size 17747
checkpoint-1500/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7abf0b3af2013b920618166f35e17d899fce776df72f2c2a64c13989f4b8a6a8
3
+ size 563
checkpoint-1500/zero_to_fp32.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage == 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dicts.append(torch.load(f, map_location=device))
147
+
148
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
149
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
150
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
151
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
152
+
153
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
154
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
155
+ # use the max of the partition_count to get the dp world_size.
156
+
157
+ if type(world_size) is list:
158
+ world_size = max(world_size)
159
+
160
+ if world_size != total_files:
161
+ raise ValueError(
162
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
163
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
164
+ )
165
+
166
+ # the groups are named differently in each stage
167
+ if zero_stage == 2:
168
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
169
+ elif zero_stage == 3:
170
+ fp32_groups_key = FP32_FLAT_GROUPS
171
+ else:
172
+ raise ValueError(f"unknown zero stage {zero_stage}")
173
+
174
+ if zero_stage == 2:
175
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
176
+ elif zero_stage == 3:
177
+ # if there is more than one param group, there will be multiple flattened tensors - one
178
+ # flattened tensor per group - for simplicity merge them into a single tensor
179
+ #
180
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
181
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
182
+
183
+ fp32_flat_groups = [
184
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
185
+ ]
186
+
187
+ return zero_stage, world_size, fp32_flat_groups
188
+
189
+
190
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
191
+ """
192
+ Returns fp32 state_dict reconstructed from ds checkpoint
193
+
194
+ Args:
195
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
196
+
197
+ """
198
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
199
+
200
+ optim_files = get_optim_files(ds_checkpoint_dir)
201
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
202
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
203
+
204
+ model_files = get_model_state_files(ds_checkpoint_dir)
205
+
206
+ zero_model_states = parse_model_states(model_files)
207
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
208
+
209
+ if zero_stage == 2:
210
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
211
+ elif zero_stage == 3:
212
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
248
+ param_shapes = zero_model_states[0].param_shapes
249
+
250
+ # Reconstruction protocol:
251
+ #
252
+ # XXX: document this
253
+
254
+ if debug:
255
+ for i in range(world_size):
256
+ for j in range(len(fp32_flat_groups[0])):
257
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
258
+
259
+ # XXX: memory usage doubles here (zero2)
260
+ num_param_groups = len(fp32_flat_groups[0])
261
+ merged_single_partition_of_fp32_groups = []
262
+ for i in range(num_param_groups):
263
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
264
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
265
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
266
+ avail_numel = sum(
267
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
268
+
269
+ if debug:
270
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
271
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
272
+ # not asserting if there is a mismatch due to possible padding
273
+ print(f"Have {avail_numel} numels to process.")
274
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
275
+
276
+ # params
277
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
278
+ # out-of-core computing solution
279
+ total_numel = 0
280
+ total_params = 0
281
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
282
+ offset = 0
283
+ avail_numel = full_single_fp32_vector.numel()
284
+ for name, shape in shapes.items():
285
+
286
+ unpartitioned_numel = shape.numel()
287
+ total_numel += unpartitioned_numel
288
+ total_params += 1
289
+
290
+ if debug:
291
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
292
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
293
+ offset += unpartitioned_numel
294
+
295
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
296
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
297
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
298
+ # live optimizer object, so we are checking that the numbers are within the right range
299
+ align_to = 2 * world_size
300
+
301
+ def zero2_align(x):
302
+ return align_to * math.ceil(x / align_to)
303
+
304
+ if debug:
305
+ print(f"original offset={offset}, avail_numel={avail_numel}")
306
+
307
+ offset = zero2_align(offset)
308
+ avail_numel = zero2_align(avail_numel)
309
+
310
+ if debug:
311
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
312
+
313
+ # Sanity check
314
+ if offset != avail_numel:
315
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
316
+
317
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
318
+
319
+
320
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
321
+ state_dict = OrderedDict()
322
+
323
+ # buffers
324
+ buffers = zero_model_states[0].buffers
325
+ state_dict.update(buffers)
326
+ if debug:
327
+ print(f"added {len(buffers)} buffers")
328
+
329
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
330
+
331
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
332
+
333
+ # recover shared parameters
334
+ for pair in zero_model_states[0].shared_params:
335
+ if pair[1] in state_dict:
336
+ state_dict[pair[0]] = state_dict[pair[1]]
337
+
338
+ return state_dict
339
+
340
+
341
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
342
+ remainder = unpartitioned_numel % world_size
343
+ padding_numel = (world_size - remainder) if remainder else 0
344
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
345
+ return partitioned_numel, padding_numel
346
+
347
+
348
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
349
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
350
+ return
351
+
352
+ if debug:
353
+ for i in range(world_size):
354
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
355
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
356
+
357
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
358
+ wanted_params = len(frozen_param_shapes)
359
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
360
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
361
+ print(f'Frozen params: Have {avail_numel} numels to process.')
362
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
363
+
364
+ total_params = 0
365
+ total_numel = 0
366
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
367
+ total_params += 1
368
+ unpartitioned_numel = shape.numel()
369
+ total_numel += unpartitioned_numel
370
+
371
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
372
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
373
+
374
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
375
+
376
+ if debug:
377
+ print(
378
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
379
+ )
380
+
381
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
382
+
383
+
384
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
385
+ param_shapes = zero_model_states[0].param_shapes
386
+ avail_numel = fp32_flat_groups[0].numel() * world_size
387
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
388
+ # param, re-consolidating each param, while dealing with padding if any
389
+
390
+ # merge list of dicts, preserving order
391
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
392
+
393
+ if debug:
394
+ for i in range(world_size):
395
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
396
+
397
+ wanted_params = len(param_shapes)
398
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
399
+ # not asserting if there is a mismatch due to possible padding
400
+ avail_numel = fp32_flat_groups[0].numel() * world_size
401
+ print(f"Trainable params: Have {avail_numel} numels to process.")
402
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
403
+
404
+ # params
405
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
406
+ # out-of-core computing solution
407
+ offset = 0
408
+ total_numel = 0
409
+ total_params = 0
410
+ for name, shape in param_shapes.items():
411
+
412
+ unpartitioned_numel = shape.numel()
413
+ total_numel += unpartitioned_numel
414
+ total_params += 1
415
+
416
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
417
+
418
+ if debug:
419
+ print(
420
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
421
+ )
422
+
423
+ # XXX: memory usage doubles here
424
+ state_dict[name] = torch.cat(
425
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
426
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
427
+ offset += partitioned_numel
428
+
429
+ offset *= world_size
430
+
431
+ # Sanity check
432
+ if offset != avail_numel:
433
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
434
+
435
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
436
+
437
+
438
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
439
+ state_dict = OrderedDict()
440
+
441
+ # buffers
442
+ buffers = zero_model_states[0].buffers
443
+ state_dict.update(buffers)
444
+ if debug:
445
+ print(f"added {len(buffers)} buffers")
446
+
447
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
448
+
449
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
450
+
451
+ # recover shared parameters
452
+ for pair in zero_model_states[0].shared_params:
453
+ if pair[1] in state_dict:
454
+ state_dict[pair[0]] = state_dict[pair[1]]
455
+
456
+ return state_dict
457
+
458
+
459
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
460
+ """
461
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
462
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
463
+ via a model hub.
464
+
465
+ Args:
466
+ - ``checkpoint_dir``: path to the desired checkpoint folder
467
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
468
+
469
+ Returns:
470
+ - pytorch ``state_dict``
471
+
472
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
473
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
474
+ the checkpoint.
475
+
476
+ A typical usage might be ::
477
+
478
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
479
+ # do the training and checkpoint saving
480
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
481
+ model = model.cpu() # move to cpu
482
+ model.load_state_dict(state_dict)
483
+ # submit to model hub or save the model to share with others
484
+
485
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
486
+ application. i.e. you will need to re-initialize the deepspeed engine, since
487
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
488
+
489
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
490
+
491
+ """
492
+ if tag is None:
493
+ latest_path = os.path.join(checkpoint_dir, 'latest')
494
+ if os.path.isfile(latest_path):
495
+ with open(latest_path, 'r') as fd:
496
+ tag = fd.read().strip()
497
+ else:
498
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
499
+
500
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
501
+
502
+ if not os.path.isdir(ds_checkpoint_dir):
503
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
504
+
505
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
506
+
507
+
508
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
509
+ """
510
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
511
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
512
+
513
+ Args:
514
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
515
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
516
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
517
+ """
518
+
519
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
520
+ print(f"Saving fp32 state dict to {output_file}")
521
+ torch.save(state_dict, output_file)
522
+
523
+
524
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
525
+ """
526
+ 1. Put the provided model to cpu
527
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
528
+ 3. Load it into the provided model
529
+
530
+ Args:
531
+ - ``model``: the model object to update
532
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
533
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
534
+
535
+ Returns:
536
+ - ``model`: modified model
537
+
538
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
539
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
540
+ conveniently placed for you in the checkpoint folder.
541
+
542
+ A typical usage might be ::
543
+
544
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
545
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
546
+ # submit to model hub or save the model to share with others
547
+
548
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
549
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
550
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
551
+
552
+ """
553
+ logger.info(f"Extracting fp32 weights")
554
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
555
+
556
+ logger.info(f"Overwriting model with fp32 weights")
557
+ model = model.cpu()
558
+ model.load_state_dict(state_dict, strict=False)
559
+
560
+ return model
561
+
562
+
563
+ if __name__ == "__main__":
564
+
565
+ parser = argparse.ArgumentParser()
566
+ parser.add_argument("checkpoint_dir",
567
+ type=str,
568
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
569
+ parser.add_argument(
570
+ "output_file",
571
+ type=str,
572
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
573
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
574
+ args = parser.parse_args()
575
+
576
+ debug = args.debug
577
+
578
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
checkpoint-2000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-2000/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceb3f896c21fa86910f56a9e280bd7ad1506fad46023dc0db740c7fbe3003dc9
3
+ size 832859281
checkpoint-2000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85fcfd48e729eba7f1ee246b51eb877dc69331ca2ab6713d8f8ba9ad4c4846a5
3
+ size 455356569
checkpoint-2000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7675869688235cb19be24b3f9763180996da46f8f41e1ad86148bfb090b469b5
3
+ size 17747
checkpoint-2000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ce55ada40659c32ea01f44d7732f0406a6c9004376f7e5579de396b3481cc2d
3
+ size 563
checkpoint-2000/zero_to_fp32.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage == 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dicts.append(torch.load(f, map_location=device))
147
+
148
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
149
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
150
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
151
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
152
+
153
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
154
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
155
+ # use the max of the partition_count to get the dp world_size.
156
+
157
+ if type(world_size) is list:
158
+ world_size = max(world_size)
159
+
160
+ if world_size != total_files:
161
+ raise ValueError(
162
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
163
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
164
+ )
165
+
166
+ # the groups are named differently in each stage
167
+ if zero_stage == 2:
168
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
169
+ elif zero_stage == 3:
170
+ fp32_groups_key = FP32_FLAT_GROUPS
171
+ else:
172
+ raise ValueError(f"unknown zero stage {zero_stage}")
173
+
174
+ if zero_stage == 2:
175
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
176
+ elif zero_stage == 3:
177
+ # if there is more than one param group, there will be multiple flattened tensors - one
178
+ # flattened tensor per group - for simplicity merge them into a single tensor
179
+ #
180
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
181
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
182
+
183
+ fp32_flat_groups = [
184
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
185
+ ]
186
+
187
+ return zero_stage, world_size, fp32_flat_groups
188
+
189
+
190
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
191
+ """
192
+ Returns fp32 state_dict reconstructed from ds checkpoint
193
+
194
+ Args:
195
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
196
+
197
+ """
198
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
199
+
200
+ optim_files = get_optim_files(ds_checkpoint_dir)
201
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
202
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
203
+
204
+ model_files = get_model_state_files(ds_checkpoint_dir)
205
+
206
+ zero_model_states = parse_model_states(model_files)
207
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
208
+
209
+ if zero_stage == 2:
210
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
211
+ elif zero_stage == 3:
212
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
248
+ param_shapes = zero_model_states[0].param_shapes
249
+
250
+ # Reconstruction protocol:
251
+ #
252
+ # XXX: document this
253
+
254
+ if debug:
255
+ for i in range(world_size):
256
+ for j in range(len(fp32_flat_groups[0])):
257
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
258
+
259
+ # XXX: memory usage doubles here (zero2)
260
+ num_param_groups = len(fp32_flat_groups[0])
261
+ merged_single_partition_of_fp32_groups = []
262
+ for i in range(num_param_groups):
263
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
264
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
265
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
266
+ avail_numel = sum(
267
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
268
+
269
+ if debug:
270
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
271
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
272
+ # not asserting if there is a mismatch due to possible padding
273
+ print(f"Have {avail_numel} numels to process.")
274
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
275
+
276
+ # params
277
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
278
+ # out-of-core computing solution
279
+ total_numel = 0
280
+ total_params = 0
281
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
282
+ offset = 0
283
+ avail_numel = full_single_fp32_vector.numel()
284
+ for name, shape in shapes.items():
285
+
286
+ unpartitioned_numel = shape.numel()
287
+ total_numel += unpartitioned_numel
288
+ total_params += 1
289
+
290
+ if debug:
291
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
292
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
293
+ offset += unpartitioned_numel
294
+
295
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
296
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
297
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
298
+ # live optimizer object, so we are checking that the numbers are within the right range
299
+ align_to = 2 * world_size
300
+
301
+ def zero2_align(x):
302
+ return align_to * math.ceil(x / align_to)
303
+
304
+ if debug:
305
+ print(f"original offset={offset}, avail_numel={avail_numel}")
306
+
307
+ offset = zero2_align(offset)
308
+ avail_numel = zero2_align(avail_numel)
309
+
310
+ if debug:
311
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
312
+
313
+ # Sanity check
314
+ if offset != avail_numel:
315
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
316
+
317
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
318
+
319
+
320
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
321
+ state_dict = OrderedDict()
322
+
323
+ # buffers
324
+ buffers = zero_model_states[0].buffers
325
+ state_dict.update(buffers)
326
+ if debug:
327
+ print(f"added {len(buffers)} buffers")
328
+
329
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
330
+
331
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
332
+
333
+ # recover shared parameters
334
+ for pair in zero_model_states[0].shared_params:
335
+ if pair[1] in state_dict:
336
+ state_dict[pair[0]] = state_dict[pair[1]]
337
+
338
+ return state_dict
339
+
340
+
341
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
342
+ remainder = unpartitioned_numel % world_size
343
+ padding_numel = (world_size - remainder) if remainder else 0
344
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
345
+ return partitioned_numel, padding_numel
346
+
347
+
348
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
349
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
350
+ return
351
+
352
+ if debug:
353
+ for i in range(world_size):
354
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
355
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
356
+
357
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
358
+ wanted_params = len(frozen_param_shapes)
359
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
360
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
361
+ print(f'Frozen params: Have {avail_numel} numels to process.')
362
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
363
+
364
+ total_params = 0
365
+ total_numel = 0
366
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
367
+ total_params += 1
368
+ unpartitioned_numel = shape.numel()
369
+ total_numel += unpartitioned_numel
370
+
371
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
372
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
373
+
374
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
375
+
376
+ if debug:
377
+ print(
378
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
379
+ )
380
+
381
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
382
+
383
+
384
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
385
+ param_shapes = zero_model_states[0].param_shapes
386
+ avail_numel = fp32_flat_groups[0].numel() * world_size
387
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
388
+ # param, re-consolidating each param, while dealing with padding if any
389
+
390
+ # merge list of dicts, preserving order
391
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
392
+
393
+ if debug:
394
+ for i in range(world_size):
395
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
396
+
397
+ wanted_params = len(param_shapes)
398
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
399
+ # not asserting if there is a mismatch due to possible padding
400
+ avail_numel = fp32_flat_groups[0].numel() * world_size
401
+ print(f"Trainable params: Have {avail_numel} numels to process.")
402
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
403
+
404
+ # params
405
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
406
+ # out-of-core computing solution
407
+ offset = 0
408
+ total_numel = 0
409
+ total_params = 0
410
+ for name, shape in param_shapes.items():
411
+
412
+ unpartitioned_numel = shape.numel()
413
+ total_numel += unpartitioned_numel
414
+ total_params += 1
415
+
416
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
417
+
418
+ if debug:
419
+ print(
420
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
421
+ )
422
+
423
+ # XXX: memory usage doubles here
424
+ state_dict[name] = torch.cat(
425
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
426
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
427
+ offset += partitioned_numel
428
+
429
+ offset *= world_size
430
+
431
+ # Sanity check
432
+ if offset != avail_numel:
433
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
434
+
435
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
436
+
437
+
438
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
439
+ state_dict = OrderedDict()
440
+
441
+ # buffers
442
+ buffers = zero_model_states[0].buffers
443
+ state_dict.update(buffers)
444
+ if debug:
445
+ print(f"added {len(buffers)} buffers")
446
+
447
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
448
+
449
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
450
+
451
+ # recover shared parameters
452
+ for pair in zero_model_states[0].shared_params:
453
+ if pair[1] in state_dict:
454
+ state_dict[pair[0]] = state_dict[pair[1]]
455
+
456
+ return state_dict
457
+
458
+
459
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
460
+ """
461
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
462
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
463
+ via a model hub.
464
+
465
+ Args:
466
+ - ``checkpoint_dir``: path to the desired checkpoint folder
467
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
468
+
469
+ Returns:
470
+ - pytorch ``state_dict``
471
+
472
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
473
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
474
+ the checkpoint.
475
+
476
+ A typical usage might be ::
477
+
478
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
479
+ # do the training and checkpoint saving
480
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
481
+ model = model.cpu() # move to cpu
482
+ model.load_state_dict(state_dict)
483
+ # submit to model hub or save the model to share with others
484
+
485
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
486
+ application. i.e. you will need to re-initialize the deepspeed engine, since
487
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
488
+
489
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
490
+
491
+ """
492
+ if tag is None:
493
+ latest_path = os.path.join(checkpoint_dir, 'latest')
494
+ if os.path.isfile(latest_path):
495
+ with open(latest_path, 'r') as fd:
496
+ tag = fd.read().strip()
497
+ else:
498
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
499
+
500
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
501
+
502
+ if not os.path.isdir(ds_checkpoint_dir):
503
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
504
+
505
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
506
+
507
+
508
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
509
+ """
510
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
511
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
512
+
513
+ Args:
514
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
515
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
516
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
517
+ """
518
+
519
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
520
+ print(f"Saving fp32 state dict to {output_file}")
521
+ torch.save(state_dict, output_file)
522
+
523
+
524
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
525
+ """
526
+ 1. Put the provided model to cpu
527
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
528
+ 3. Load it into the provided model
529
+
530
+ Args:
531
+ - ``model``: the model object to update
532
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
533
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
534
+
535
+ Returns:
536
+ - ``model`: modified model
537
+
538
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
539
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
540
+ conveniently placed for you in the checkpoint folder.
541
+
542
+ A typical usage might be ::
543
+
544
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
545
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
546
+ # submit to model hub or save the model to share with others
547
+
548
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
549
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
550
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
551
+
552
+ """
553
+ logger.info(f"Extracting fp32 weights")
554
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
555
+
556
+ logger.info(f"Overwriting model with fp32 weights")
557
+ model = model.cpu()
558
+ model.load_state_dict(state_dict, strict=False)
559
+
560
+ return model
561
+
562
+
563
+ if __name__ == "__main__":
564
+
565
+ parser = argparse.ArgumentParser()
566
+ parser.add_argument("checkpoint_dir",
567
+ type=str,
568
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
569
+ parser.add_argument(
570
+ "output_file",
571
+ type=str,
572
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
573
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
574
+ args = parser.parse_args()
575
+
576
+ debug = args.debug
577
+
578
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
checkpoint-2500/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-2500/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6f9516ba60ba9732691676243bab1d66010bd947fdc1b4e21ee5857b48b2114
3
+ size 832859281
checkpoint-2500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaf0bcd2c529305953edaf7d42802b49f6d3b6652eb62386c54c5e44338aea34
3
+ size 455356569
checkpoint-2500/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9210d1debdd0ff09eb9f99242e7f2df4f0c5ba776dfd64ab4ac3d52bacb34542
3
+ size 17747
checkpoint-2500/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:496e5a35d8686f4bf9ce59c376f5f501ec4a1034cf3b70deb1f3d2c047fab2d4
3
+ size 563
checkpoint-2500/zero_to_fp32.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage == 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dicts.append(torch.load(f, map_location=device))
147
+
148
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
149
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
150
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
151
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
152
+
153
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
154
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
155
+ # use the max of the partition_count to get the dp world_size.
156
+
157
+ if type(world_size) is list:
158
+ world_size = max(world_size)
159
+
160
+ if world_size != total_files:
161
+ raise ValueError(
162
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
163
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
164
+ )
165
+
166
+ # the groups are named differently in each stage
167
+ if zero_stage == 2:
168
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
169
+ elif zero_stage == 3:
170
+ fp32_groups_key = FP32_FLAT_GROUPS
171
+ else:
172
+ raise ValueError(f"unknown zero stage {zero_stage}")
173
+
174
+ if zero_stage == 2:
175
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
176
+ elif zero_stage == 3:
177
+ # if there is more than one param group, there will be multiple flattened tensors - one
178
+ # flattened tensor per group - for simplicity merge them into a single tensor
179
+ #
180
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
181
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
182
+
183
+ fp32_flat_groups = [
184
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
185
+ ]
186
+
187
+ return zero_stage, world_size, fp32_flat_groups
188
+
189
+
190
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
191
+ """
192
+ Returns fp32 state_dict reconstructed from ds checkpoint
193
+
194
+ Args:
195
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
196
+
197
+ """
198
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
199
+
200
+ optim_files = get_optim_files(ds_checkpoint_dir)
201
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
202
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
203
+
204
+ model_files = get_model_state_files(ds_checkpoint_dir)
205
+
206
+ zero_model_states = parse_model_states(model_files)
207
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
208
+
209
+ if zero_stage == 2:
210
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
211
+ elif zero_stage == 3:
212
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
248
+ param_shapes = zero_model_states[0].param_shapes
249
+
250
+ # Reconstruction protocol:
251
+ #
252
+ # XXX: document this
253
+
254
+ if debug:
255
+ for i in range(world_size):
256
+ for j in range(len(fp32_flat_groups[0])):
257
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
258
+
259
+ # XXX: memory usage doubles here (zero2)
260
+ num_param_groups = len(fp32_flat_groups[0])
261
+ merged_single_partition_of_fp32_groups = []
262
+ for i in range(num_param_groups):
263
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
264
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
265
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
266
+ avail_numel = sum(
267
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
268
+
269
+ if debug:
270
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
271
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
272
+ # not asserting if there is a mismatch due to possible padding
273
+ print(f"Have {avail_numel} numels to process.")
274
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
275
+
276
+ # params
277
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
278
+ # out-of-core computing solution
279
+ total_numel = 0
280
+ total_params = 0
281
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
282
+ offset = 0
283
+ avail_numel = full_single_fp32_vector.numel()
284
+ for name, shape in shapes.items():
285
+
286
+ unpartitioned_numel = shape.numel()
287
+ total_numel += unpartitioned_numel
288
+ total_params += 1
289
+
290
+ if debug:
291
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
292
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
293
+ offset += unpartitioned_numel
294
+
295
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
296
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
297
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
298
+ # live optimizer object, so we are checking that the numbers are within the right range
299
+ align_to = 2 * world_size
300
+
301
+ def zero2_align(x):
302
+ return align_to * math.ceil(x / align_to)
303
+
304
+ if debug:
305
+ print(f"original offset={offset}, avail_numel={avail_numel}")
306
+
307
+ offset = zero2_align(offset)
308
+ avail_numel = zero2_align(avail_numel)
309
+
310
+ if debug:
311
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
312
+
313
+ # Sanity check
314
+ if offset != avail_numel:
315
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
316
+
317
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
318
+
319
+
320
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
321
+ state_dict = OrderedDict()
322
+
323
+ # buffers
324
+ buffers = zero_model_states[0].buffers
325
+ state_dict.update(buffers)
326
+ if debug:
327
+ print(f"added {len(buffers)} buffers")
328
+
329
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
330
+
331
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
332
+
333
+ # recover shared parameters
334
+ for pair in zero_model_states[0].shared_params:
335
+ if pair[1] in state_dict:
336
+ state_dict[pair[0]] = state_dict[pair[1]]
337
+
338
+ return state_dict
339
+
340
+
341
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
342
+ remainder = unpartitioned_numel % world_size
343
+ padding_numel = (world_size - remainder) if remainder else 0
344
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
345
+ return partitioned_numel, padding_numel
346
+
347
+
348
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
349
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
350
+ return
351
+
352
+ if debug:
353
+ for i in range(world_size):
354
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
355
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
356
+
357
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
358
+ wanted_params = len(frozen_param_shapes)
359
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
360
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
361
+ print(f'Frozen params: Have {avail_numel} numels to process.')
362
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
363
+
364
+ total_params = 0
365
+ total_numel = 0
366
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
367
+ total_params += 1
368
+ unpartitioned_numel = shape.numel()
369
+ total_numel += unpartitioned_numel
370
+
371
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
372
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
373
+
374
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
375
+
376
+ if debug:
377
+ print(
378
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
379
+ )
380
+
381
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
382
+
383
+
384
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
385
+ param_shapes = zero_model_states[0].param_shapes
386
+ avail_numel = fp32_flat_groups[0].numel() * world_size
387
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
388
+ # param, re-consolidating each param, while dealing with padding if any
389
+
390
+ # merge list of dicts, preserving order
391
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
392
+
393
+ if debug:
394
+ for i in range(world_size):
395
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
396
+
397
+ wanted_params = len(param_shapes)
398
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
399
+ # not asserting if there is a mismatch due to possible padding
400
+ avail_numel = fp32_flat_groups[0].numel() * world_size
401
+ print(f"Trainable params: Have {avail_numel} numels to process.")
402
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
403
+
404
+ # params
405
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
406
+ # out-of-core computing solution
407
+ offset = 0
408
+ total_numel = 0
409
+ total_params = 0
410
+ for name, shape in param_shapes.items():
411
+
412
+ unpartitioned_numel = shape.numel()
413
+ total_numel += unpartitioned_numel
414
+ total_params += 1
415
+
416
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
417
+
418
+ if debug:
419
+ print(
420
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
421
+ )
422
+
423
+ # XXX: memory usage doubles here
424
+ state_dict[name] = torch.cat(
425
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
426
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
427
+ offset += partitioned_numel
428
+
429
+ offset *= world_size
430
+
431
+ # Sanity check
432
+ if offset != avail_numel:
433
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
434
+
435
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
436
+
437
+
438
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
439
+ state_dict = OrderedDict()
440
+
441
+ # buffers
442
+ buffers = zero_model_states[0].buffers
443
+ state_dict.update(buffers)
444
+ if debug:
445
+ print(f"added {len(buffers)} buffers")
446
+
447
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
448
+
449
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
450
+
451
+ # recover shared parameters
452
+ for pair in zero_model_states[0].shared_params:
453
+ if pair[1] in state_dict:
454
+ state_dict[pair[0]] = state_dict[pair[1]]
455
+
456
+ return state_dict
457
+
458
+
459
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
460
+ """
461
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
462
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
463
+ via a model hub.
464
+
465
+ Args:
466
+ - ``checkpoint_dir``: path to the desired checkpoint folder
467
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
468
+
469
+ Returns:
470
+ - pytorch ``state_dict``
471
+
472
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
473
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
474
+ the checkpoint.
475
+
476
+ A typical usage might be ::
477
+
478
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
479
+ # do the training and checkpoint saving
480
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
481
+ model = model.cpu() # move to cpu
482
+ model.load_state_dict(state_dict)
483
+ # submit to model hub or save the model to share with others
484
+
485
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
486
+ application. i.e. you will need to re-initialize the deepspeed engine, since
487
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
488
+
489
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
490
+
491
+ """
492
+ if tag is None:
493
+ latest_path = os.path.join(checkpoint_dir, 'latest')
494
+ if os.path.isfile(latest_path):
495
+ with open(latest_path, 'r') as fd:
496
+ tag = fd.read().strip()
497
+ else:
498
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
499
+
500
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
501
+
502
+ if not os.path.isdir(ds_checkpoint_dir):
503
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
504
+
505
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
506
+
507
+
508
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
509
+ """
510
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
511
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
512
+
513
+ Args:
514
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
515
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
516
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
517
+ """
518
+
519
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
520
+ print(f"Saving fp32 state dict to {output_file}")
521
+ torch.save(state_dict, output_file)
522
+
523
+
524
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
525
+ """
526
+ 1. Put the provided model to cpu
527
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
528
+ 3. Load it into the provided model
529
+
530
+ Args:
531
+ - ``model``: the model object to update
532
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
533
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
534
+
535
+ Returns:
536
+ - ``model`: modified model
537
+
538
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
539
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
540
+ conveniently placed for you in the checkpoint folder.
541
+
542
+ A typical usage might be ::
543
+
544
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
545
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
546
+ # submit to model hub or save the model to share with others
547
+
548
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
549
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
550
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
551
+
552
+ """
553
+ logger.info(f"Extracting fp32 weights")
554
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
555
+
556
+ logger.info(f"Overwriting model with fp32 weights")
557
+ model = model.cpu()
558
+ model.load_state_dict(state_dict, strict=False)
559
+
560
+ return model
561
+
562
+
563
+ if __name__ == "__main__":
564
+
565
+ parser = argparse.ArgumentParser()
566
+ parser.add_argument("checkpoint_dir",
567
+ type=str,
568
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
569
+ parser.add_argument(
570
+ "output_file",
571
+ type=str,
572
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
573
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
574
+ args = parser.parse_args()
575
+
576
+ debug = args.debug
577
+
578
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
checkpoint-3000/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-3000/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e12966f3b7eb463e7ed7c7d249f562ca7fc7219a9e35f681341f275027b7582
3
+ size 832859281
checkpoint-3000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09e8da04e2158706e2d26e39567e8b5f3b73e7a5fe9677f7409818091c395747
3
+ size 455356569
checkpoint-3000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb1a4c89fedcf9bd85d6814d12624d8fc1b47d4ebda173a8280f0ccfeff96f22
3
+ size 17747
checkpoint-3000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2056b541ceef5d77999a48951b85854f6503b83d58bc070e970ec3531e56e169
3
+ size 563
checkpoint-3000/zero_to_fp32.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage == 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dicts.append(torch.load(f, map_location=device))
147
+
148
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
149
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
150
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
151
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
152
+
153
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
154
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
155
+ # use the max of the partition_count to get the dp world_size.
156
+
157
+ if type(world_size) is list:
158
+ world_size = max(world_size)
159
+
160
+ if world_size != total_files:
161
+ raise ValueError(
162
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
163
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
164
+ )
165
+
166
+ # the groups are named differently in each stage
167
+ if zero_stage == 2:
168
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
169
+ elif zero_stage == 3:
170
+ fp32_groups_key = FP32_FLAT_GROUPS
171
+ else:
172
+ raise ValueError(f"unknown zero stage {zero_stage}")
173
+
174
+ if zero_stage == 2:
175
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
176
+ elif zero_stage == 3:
177
+ # if there is more than one param group, there will be multiple flattened tensors - one
178
+ # flattened tensor per group - for simplicity merge them into a single tensor
179
+ #
180
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
181
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
182
+
183
+ fp32_flat_groups = [
184
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
185
+ ]
186
+
187
+ return zero_stage, world_size, fp32_flat_groups
188
+
189
+
190
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
191
+ """
192
+ Returns fp32 state_dict reconstructed from ds checkpoint
193
+
194
+ Args:
195
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
196
+
197
+ """
198
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
199
+
200
+ optim_files = get_optim_files(ds_checkpoint_dir)
201
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
202
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
203
+
204
+ model_files = get_model_state_files(ds_checkpoint_dir)
205
+
206
+ zero_model_states = parse_model_states(model_files)
207
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
208
+
209
+ if zero_stage == 2:
210
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
211
+ elif zero_stage == 3:
212
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
248
+ param_shapes = zero_model_states[0].param_shapes
249
+
250
+ # Reconstruction protocol:
251
+ #
252
+ # XXX: document this
253
+
254
+ if debug:
255
+ for i in range(world_size):
256
+ for j in range(len(fp32_flat_groups[0])):
257
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
258
+
259
+ # XXX: memory usage doubles here (zero2)
260
+ num_param_groups = len(fp32_flat_groups[0])
261
+ merged_single_partition_of_fp32_groups = []
262
+ for i in range(num_param_groups):
263
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
264
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
265
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
266
+ avail_numel = sum(
267
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
268
+
269
+ if debug:
270
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
271
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
272
+ # not asserting if there is a mismatch due to possible padding
273
+ print(f"Have {avail_numel} numels to process.")
274
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
275
+
276
+ # params
277
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
278
+ # out-of-core computing solution
279
+ total_numel = 0
280
+ total_params = 0
281
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
282
+ offset = 0
283
+ avail_numel = full_single_fp32_vector.numel()
284
+ for name, shape in shapes.items():
285
+
286
+ unpartitioned_numel = shape.numel()
287
+ total_numel += unpartitioned_numel
288
+ total_params += 1
289
+
290
+ if debug:
291
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
292
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
293
+ offset += unpartitioned_numel
294
+
295
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
296
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
297
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
298
+ # live optimizer object, so we are checking that the numbers are within the right range
299
+ align_to = 2 * world_size
300
+
301
+ def zero2_align(x):
302
+ return align_to * math.ceil(x / align_to)
303
+
304
+ if debug:
305
+ print(f"original offset={offset}, avail_numel={avail_numel}")
306
+
307
+ offset = zero2_align(offset)
308
+ avail_numel = zero2_align(avail_numel)
309
+
310
+ if debug:
311
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
312
+
313
+ # Sanity check
314
+ if offset != avail_numel:
315
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
316
+
317
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
318
+
319
+
320
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
321
+ state_dict = OrderedDict()
322
+
323
+ # buffers
324
+ buffers = zero_model_states[0].buffers
325
+ state_dict.update(buffers)
326
+ if debug:
327
+ print(f"added {len(buffers)} buffers")
328
+
329
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
330
+
331
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
332
+
333
+ # recover shared parameters
334
+ for pair in zero_model_states[0].shared_params:
335
+ if pair[1] in state_dict:
336
+ state_dict[pair[0]] = state_dict[pair[1]]
337
+
338
+ return state_dict
339
+
340
+
341
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
342
+ remainder = unpartitioned_numel % world_size
343
+ padding_numel = (world_size - remainder) if remainder else 0
344
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
345
+ return partitioned_numel, padding_numel
346
+
347
+
348
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
349
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
350
+ return
351
+
352
+ if debug:
353
+ for i in range(world_size):
354
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
355
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
356
+
357
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
358
+ wanted_params = len(frozen_param_shapes)
359
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
360
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
361
+ print(f'Frozen params: Have {avail_numel} numels to process.')
362
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
363
+
364
+ total_params = 0
365
+ total_numel = 0
366
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
367
+ total_params += 1
368
+ unpartitioned_numel = shape.numel()
369
+ total_numel += unpartitioned_numel
370
+
371
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
372
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
373
+
374
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
375
+
376
+ if debug:
377
+ print(
378
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
379
+ )
380
+
381
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
382
+
383
+
384
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
385
+ param_shapes = zero_model_states[0].param_shapes
386
+ avail_numel = fp32_flat_groups[0].numel() * world_size
387
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
388
+ # param, re-consolidating each param, while dealing with padding if any
389
+
390
+ # merge list of dicts, preserving order
391
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
392
+
393
+ if debug:
394
+ for i in range(world_size):
395
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
396
+
397
+ wanted_params = len(param_shapes)
398
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
399
+ # not asserting if there is a mismatch due to possible padding
400
+ avail_numel = fp32_flat_groups[0].numel() * world_size
401
+ print(f"Trainable params: Have {avail_numel} numels to process.")
402
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
403
+
404
+ # params
405
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
406
+ # out-of-core computing solution
407
+ offset = 0
408
+ total_numel = 0
409
+ total_params = 0
410
+ for name, shape in param_shapes.items():
411
+
412
+ unpartitioned_numel = shape.numel()
413
+ total_numel += unpartitioned_numel
414
+ total_params += 1
415
+
416
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
417
+
418
+ if debug:
419
+ print(
420
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
421
+ )
422
+
423
+ # XXX: memory usage doubles here
424
+ state_dict[name] = torch.cat(
425
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
426
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
427
+ offset += partitioned_numel
428
+
429
+ offset *= world_size
430
+
431
+ # Sanity check
432
+ if offset != avail_numel:
433
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
434
+
435
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
436
+
437
+
438
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
439
+ state_dict = OrderedDict()
440
+
441
+ # buffers
442
+ buffers = zero_model_states[0].buffers
443
+ state_dict.update(buffers)
444
+ if debug:
445
+ print(f"added {len(buffers)} buffers")
446
+
447
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
448
+
449
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
450
+
451
+ # recover shared parameters
452
+ for pair in zero_model_states[0].shared_params:
453
+ if pair[1] in state_dict:
454
+ state_dict[pair[0]] = state_dict[pair[1]]
455
+
456
+ return state_dict
457
+
458
+
459
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
460
+ """
461
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
462
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
463
+ via a model hub.
464
+
465
+ Args:
466
+ - ``checkpoint_dir``: path to the desired checkpoint folder
467
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
468
+
469
+ Returns:
470
+ - pytorch ``state_dict``
471
+
472
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
473
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
474
+ the checkpoint.
475
+
476
+ A typical usage might be ::
477
+
478
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
479
+ # do the training and checkpoint saving
480
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
481
+ model = model.cpu() # move to cpu
482
+ model.load_state_dict(state_dict)
483
+ # submit to model hub or save the model to share with others
484
+
485
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
486
+ application. i.e. you will need to re-initialize the deepspeed engine, since
487
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
488
+
489
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
490
+
491
+ """
492
+ if tag is None:
493
+ latest_path = os.path.join(checkpoint_dir, 'latest')
494
+ if os.path.isfile(latest_path):
495
+ with open(latest_path, 'r') as fd:
496
+ tag = fd.read().strip()
497
+ else:
498
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
499
+
500
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
501
+
502
+ if not os.path.isdir(ds_checkpoint_dir):
503
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
504
+
505
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
506
+
507
+
508
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
509
+ """
510
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
511
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
512
+
513
+ Args:
514
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
515
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
516
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
517
+ """
518
+
519
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
520
+ print(f"Saving fp32 state dict to {output_file}")
521
+ torch.save(state_dict, output_file)
522
+
523
+
524
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
525
+ """
526
+ 1. Put the provided model to cpu
527
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
528
+ 3. Load it into the provided model
529
+
530
+ Args:
531
+ - ``model``: the model object to update
532
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
533
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
534
+
535
+ Returns:
536
+ - ``model`: modified model
537
+
538
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
539
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
540
+ conveniently placed for you in the checkpoint folder.
541
+
542
+ A typical usage might be ::
543
+
544
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
545
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
546
+ # submit to model hub or save the model to share with others
547
+
548
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
549
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
550
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
551
+
552
+ """
553
+ logger.info(f"Extracting fp32 weights")
554
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
555
+
556
+ logger.info(f"Overwriting model with fp32 weights")
557
+ model = model.cpu()
558
+ model.load_state_dict(state_dict, strict=False)
559
+
560
+ return model
561
+
562
+
563
+ if __name__ == "__main__":
564
+
565
+ parser = argparse.ArgumentParser()
566
+ parser.add_argument("checkpoint_dir",
567
+ type=str,
568
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
569
+ parser.add_argument(
570
+ "output_file",
571
+ type=str,
572
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
573
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
574
+ args = parser.parse_args()
575
+
576
+ debug = args.debug
577
+
578
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
checkpoint-500/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ pytorch_model
checkpoint-500/pytorch_model/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4941c4de961397308bbc114edbc1e2c2f6406160f91ecb7843e9bb3f8a425bd
3
+ size 832859281
checkpoint-500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75e02e697570f4eb65b136c04e35abf6055e08564626f6cdefba77cf0b7cdf2b
3
+ size 455356569
checkpoint-500/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e679a594f8fe773bd23dd820c941bdf068eb250f9b6ceae1d55180c9b649d666
3
+ size 17683
checkpoint-500/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:478ff7b9adcb1fba8f74abd29e0dfb41133888ecf608d07ea8ae2143e019d098
3
+ size 563
checkpoint-500/zero_to_fp32.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage == 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dicts.append(torch.load(f, map_location=device))
147
+
148
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
149
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
150
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
151
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
152
+
153
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
154
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
155
+ # use the max of the partition_count to get the dp world_size.
156
+
157
+ if type(world_size) is list:
158
+ world_size = max(world_size)
159
+
160
+ if world_size != total_files:
161
+ raise ValueError(
162
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
163
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
164
+ )
165
+
166
+ # the groups are named differently in each stage
167
+ if zero_stage == 2:
168
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
169
+ elif zero_stage == 3:
170
+ fp32_groups_key = FP32_FLAT_GROUPS
171
+ else:
172
+ raise ValueError(f"unknown zero stage {zero_stage}")
173
+
174
+ if zero_stage == 2:
175
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
176
+ elif zero_stage == 3:
177
+ # if there is more than one param group, there will be multiple flattened tensors - one
178
+ # flattened tensor per group - for simplicity merge them into a single tensor
179
+ #
180
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
181
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
182
+
183
+ fp32_flat_groups = [
184
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
185
+ ]
186
+
187
+ return zero_stage, world_size, fp32_flat_groups
188
+
189
+
190
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
191
+ """
192
+ Returns fp32 state_dict reconstructed from ds checkpoint
193
+
194
+ Args:
195
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
196
+
197
+ """
198
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
199
+
200
+ optim_files = get_optim_files(ds_checkpoint_dir)
201
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
202
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
203
+
204
+ model_files = get_model_state_files(ds_checkpoint_dir)
205
+
206
+ zero_model_states = parse_model_states(model_files)
207
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
208
+
209
+ if zero_stage == 2:
210
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
211
+ elif zero_stage == 3:
212
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
248
+ param_shapes = zero_model_states[0].param_shapes
249
+
250
+ # Reconstruction protocol:
251
+ #
252
+ # XXX: document this
253
+
254
+ if debug:
255
+ for i in range(world_size):
256
+ for j in range(len(fp32_flat_groups[0])):
257
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
258
+
259
+ # XXX: memory usage doubles here (zero2)
260
+ num_param_groups = len(fp32_flat_groups[0])
261
+ merged_single_partition_of_fp32_groups = []
262
+ for i in range(num_param_groups):
263
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
264
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
265
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
266
+ avail_numel = sum(
267
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
268
+
269
+ if debug:
270
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
271
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
272
+ # not asserting if there is a mismatch due to possible padding
273
+ print(f"Have {avail_numel} numels to process.")
274
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
275
+
276
+ # params
277
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
278
+ # out-of-core computing solution
279
+ total_numel = 0
280
+ total_params = 0
281
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
282
+ offset = 0
283
+ avail_numel = full_single_fp32_vector.numel()
284
+ for name, shape in shapes.items():
285
+
286
+ unpartitioned_numel = shape.numel()
287
+ total_numel += unpartitioned_numel
288
+ total_params += 1
289
+
290
+ if debug:
291
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
292
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
293
+ offset += unpartitioned_numel
294
+
295
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
296
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
297
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
298
+ # live optimizer object, so we are checking that the numbers are within the right range
299
+ align_to = 2 * world_size
300
+
301
+ def zero2_align(x):
302
+ return align_to * math.ceil(x / align_to)
303
+
304
+ if debug:
305
+ print(f"original offset={offset}, avail_numel={avail_numel}")
306
+
307
+ offset = zero2_align(offset)
308
+ avail_numel = zero2_align(avail_numel)
309
+
310
+ if debug:
311
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
312
+
313
+ # Sanity check
314
+ if offset != avail_numel:
315
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
316
+
317
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
318
+
319
+
320
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
321
+ state_dict = OrderedDict()
322
+
323
+ # buffers
324
+ buffers = zero_model_states[0].buffers
325
+ state_dict.update(buffers)
326
+ if debug:
327
+ print(f"added {len(buffers)} buffers")
328
+
329
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
330
+
331
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
332
+
333
+ # recover shared parameters
334
+ for pair in zero_model_states[0].shared_params:
335
+ if pair[1] in state_dict:
336
+ state_dict[pair[0]] = state_dict[pair[1]]
337
+
338
+ return state_dict
339
+
340
+
341
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
342
+ remainder = unpartitioned_numel % world_size
343
+ padding_numel = (world_size - remainder) if remainder else 0
344
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
345
+ return partitioned_numel, padding_numel
346
+
347
+
348
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
349
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
350
+ return
351
+
352
+ if debug:
353
+ for i in range(world_size):
354
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
355
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
356
+
357
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
358
+ wanted_params = len(frozen_param_shapes)
359
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
360
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
361
+ print(f'Frozen params: Have {avail_numel} numels to process.')
362
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
363
+
364
+ total_params = 0
365
+ total_numel = 0
366
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
367
+ total_params += 1
368
+ unpartitioned_numel = shape.numel()
369
+ total_numel += unpartitioned_numel
370
+
371
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
372
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
373
+
374
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
375
+
376
+ if debug:
377
+ print(
378
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
379
+ )
380
+
381
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
382
+
383
+
384
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
385
+ param_shapes = zero_model_states[0].param_shapes
386
+ avail_numel = fp32_flat_groups[0].numel() * world_size
387
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
388
+ # param, re-consolidating each param, while dealing with padding if any
389
+
390
+ # merge list of dicts, preserving order
391
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
392
+
393
+ if debug:
394
+ for i in range(world_size):
395
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
396
+
397
+ wanted_params = len(param_shapes)
398
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
399
+ # not asserting if there is a mismatch due to possible padding
400
+ avail_numel = fp32_flat_groups[0].numel() * world_size
401
+ print(f"Trainable params: Have {avail_numel} numels to process.")
402
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
403
+
404
+ # params
405
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
406
+ # out-of-core computing solution
407
+ offset = 0
408
+ total_numel = 0
409
+ total_params = 0
410
+ for name, shape in param_shapes.items():
411
+
412
+ unpartitioned_numel = shape.numel()
413
+ total_numel += unpartitioned_numel
414
+ total_params += 1
415
+
416
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
417
+
418
+ if debug:
419
+ print(
420
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
421
+ )
422
+
423
+ # XXX: memory usage doubles here
424
+ state_dict[name] = torch.cat(
425
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
426
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
427
+ offset += partitioned_numel
428
+
429
+ offset *= world_size
430
+
431
+ # Sanity check
432
+ if offset != avail_numel:
433
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
434
+
435
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
436
+
437
+
438
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
439
+ state_dict = OrderedDict()
440
+
441
+ # buffers
442
+ buffers = zero_model_states[0].buffers
443
+ state_dict.update(buffers)
444
+ if debug:
445
+ print(f"added {len(buffers)} buffers")
446
+
447
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
448
+
449
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
450
+
451
+ # recover shared parameters
452
+ for pair in zero_model_states[0].shared_params:
453
+ if pair[1] in state_dict:
454
+ state_dict[pair[0]] = state_dict[pair[1]]
455
+
456
+ return state_dict
457
+
458
+
459
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
460
+ """
461
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
462
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
463
+ via a model hub.
464
+
465
+ Args:
466
+ - ``checkpoint_dir``: path to the desired checkpoint folder
467
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
468
+
469
+ Returns:
470
+ - pytorch ``state_dict``
471
+
472
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
473
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
474
+ the checkpoint.
475
+
476
+ A typical usage might be ::
477
+
478
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
479
+ # do the training and checkpoint saving
480
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
481
+ model = model.cpu() # move to cpu
482
+ model.load_state_dict(state_dict)
483
+ # submit to model hub or save the model to share with others
484
+
485
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
486
+ application. i.e. you will need to re-initialize the deepspeed engine, since
487
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
488
+
489
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
490
+
491
+ """
492
+ if tag is None:
493
+ latest_path = os.path.join(checkpoint_dir, 'latest')
494
+ if os.path.isfile(latest_path):
495
+ with open(latest_path, 'r') as fd:
496
+ tag = fd.read().strip()
497
+ else:
498
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
499
+
500
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
501
+
502
+ if not os.path.isdir(ds_checkpoint_dir):
503
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
504
+
505
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
506
+
507
+
508
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
509
+ """
510
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
511
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
512
+
513
+ Args:
514
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
515
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
516
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
517
+ """
518
+
519
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
520
+ print(f"Saving fp32 state dict to {output_file}")
521
+ torch.save(state_dict, output_file)
522
+
523
+
524
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
525
+ """
526
+ 1. Put the provided model to cpu
527
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
528
+ 3. Load it into the provided model
529
+
530
+ Args:
531
+ - ``model``: the model object to update
532
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
533
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
534
+
535
+ Returns:
536
+ - ``model`: modified model
537
+
538
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
539
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
540
+ conveniently placed for you in the checkpoint folder.
541
+
542
+ A typical usage might be ::
543
+
544
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
545
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
546
+ # submit to model hub or save the model to share with others
547
+
548
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
549
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
550
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
551
+
552
+ """
553
+ logger.info(f"Extracting fp32 weights")
554
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
555
+
556
+ logger.info(f"Overwriting model with fp32 weights")
557
+ model = model.cpu()
558
+ model.load_state_dict(state_dict, strict=False)
559
+
560
+ return model
561
+
562
+
563
+ if __name__ == "__main__":
564
+
565
+ parser = argparse.ArgumentParser()
566
+ parser.add_argument("checkpoint_dir",
567
+ type=str,
568
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
569
+ parser.add_argument(
570
+ "output_file",
571
+ type=str,
572
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
573
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
574
+ args = parser.parse_args()
575
+
576
+ debug = args.debug
577
+
578
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "feature_extractor_type": "CLIPFeatureExtractor",
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "CLIPImageProcessor",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
+ }
learned_embeds-steps-3000.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9099a69a1d91315b711f3df24d3049870bb1f6b12e0d53f698df20cd43564386
3
+ size 4001
learned_embeds.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6df7dad24c7414a72d9f37ab74a61045a7dde209a2d539094d9f18c5e71d75a2
3
+ size 3840
logs/textual_inversion/1689321680.473668/events.out.tfevents.1689321680.gpu-1.2126126.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4050ea99a42371699cd1e69c0932966c863b053c09b67360c02861bcb57b8725
3
+ size 2279
logs/textual_inversion/1689321680.4759483/hparams.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adam_beta1: 0.9
2
+ adam_beta2: 0.999
3
+ adam_epsilon: 1.0e-08
4
+ adam_weight_decay: 0.01
5
+ allow_tf32: false
6
+ center_crop: false
7
+ checkpointing_steps: 500
8
+ checkpoints_total_limit: null
9
+ dataloader_num_workers: 0
10
+ enable_xformers_memory_efficient_attention: false
11
+ gradient_accumulation_steps: 4
12
+ gradient_checkpointing: false
13
+ hub_model_id: null
14
+ hub_token: null
15
+ initializer_token: style
16
+ learnable_property: style
17
+ learning_rate: 0.002
18
+ local_rank: 0
19
+ logging_dir: logs
20
+ lr_num_cycles: 1
21
+ lr_scheduler: constant
22
+ lr_warmup_steps: 0
23
+ max_train_steps: 3000
24
+ mixed_precision: 'no'
25
+ num_train_epochs: 14
26
+ num_validation_images: 4
27
+ num_vectors: 1
28
+ output_dir: textual_inversion_slime_up
29
+ placeholder_token: <simple-design>
30
+ pretrained_model_name_or_path: runwayml/stable-diffusion-v1-5
31
+ push_to_hub: true
32
+ repeats: 100
33
+ report_to: tensorboard
34
+ resolution: 512
35
+ resume_from_checkpoint: null
36
+ revision: null
37
+ save_as_full_pipeline: false
38
+ save_steps: 3000
39
+ scale_lr: true
40
+ seed: null
41
+ tokenizer_name: null
42
+ train_batch_size: 1
43
+ train_data_dir: ./slime-high
44
+ validation_epochs: null
45
+ validation_prompt: null
46
+ validation_steps: 100
logs/textual_inversion/events.out.tfevents.1689321680.gpu-1.2126126.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be31c4543a90011878873ef50682153d16789ecc118ae206dd25c8eea17a2ca0
3
+ size 252674
model_index.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.19.0.dev0",
4
+ "_name_or_path": "runwayml/stable-diffusion-v1-5",
5
+ "feature_extractor": [
6
+ "transformers",
7
+ "CLIPImageProcessor"
8
+ ],
9
+ "requires_safety_checker": true,
10
+ "safety_checker": [
11
+ "stable_diffusion",
12
+ "StableDiffusionSafetyChecker"
13
+ ],
14
+ "scheduler": [
15
+ "diffusers",
16
+ "PNDMScheduler"
17
+ ],
18
+ "text_encoder": [
19
+ "transformers",
20
+ "CLIPTextModel"
21
+ ],
22
+ "tokenizer": [
23
+ "transformers",
24
+ "CLIPTokenizer"
25
+ ],
26
+ "unet": [
27
+ "diffusers",
28
+ "UNet2DConditionModel"
29
+ ],
30
+ "vae": [
31
+ "diffusers",
32
+ "AutoencoderKL"
33
+ ]
34
+ }
safety_checker/config.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": "c9ab35ff5f2c362e9e22fbafe278077e196057f0",
3
+ "_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/c9ab35ff5f2c362e9e22fbafe278077e196057f0/safety_checker",
4
+ "architectures": [
5
+ "StableDiffusionSafetyChecker"
6
+ ],
7
+ "initializer_factor": 1.0,
8
+ "logit_scale_init_value": 2.6592,
9
+ "model_type": "clip",
10
+ "projection_dim": 768,
11
+ "text_config": {
12
+ "_name_or_path": "",
13
+ "add_cross_attention": false,
14
+ "architectures": null,
15
+ "attention_dropout": 0.0,
16
+ "bad_words_ids": null,
17
+ "begin_suppress_tokens": null,
18
+ "bos_token_id": 0,
19
+ "chunk_size_feed_forward": 0,
20
+ "cross_attention_hidden_size": null,
21
+ "decoder_start_token_id": null,
22
+ "diversity_penalty": 0.0,
23
+ "do_sample": false,
24
+ "dropout": 0.0,
25
+ "early_stopping": false,
26
+ "encoder_no_repeat_ngram_size": 0,
27
+ "eos_token_id": 2,
28
+ "exponential_decay_length_penalty": null,
29
+ "finetuning_task": null,
30
+ "forced_bos_token_id": null,
31
+ "forced_eos_token_id": null,
32
+ "hidden_act": "quick_gelu",
33
+ "hidden_size": 768,
34
+ "id2label": {
35
+ "0": "LABEL_0",
36
+ "1": "LABEL_1"
37
+ },
38
+ "initializer_factor": 1.0,
39
+ "initializer_range": 0.02,
40
+ "intermediate_size": 3072,
41
+ "is_decoder": false,
42
+ "is_encoder_decoder": false,
43
+ "label2id": {
44
+ "LABEL_0": 0,
45
+ "LABEL_1": 1
46
+ },
47
+ "layer_norm_eps": 1e-05,
48
+ "length_penalty": 1.0,
49
+ "max_length": 20,
50
+ "max_position_embeddings": 77,
51
+ "min_length": 0,
52
+ "model_type": "clip_text_model",
53
+ "no_repeat_ngram_size": 0,
54
+ "num_attention_heads": 12,
55
+ "num_beam_groups": 1,
56
+ "num_beams": 1,
57
+ "num_hidden_layers": 12,
58
+ "num_return_sequences": 1,
59
+ "output_attentions": false,
60
+ "output_hidden_states": false,
61
+ "output_scores": false,
62
+ "pad_token_id": 1,
63
+ "prefix": null,
64
+ "problem_type": null,
65
+ "projection_dim": 512,
66
+ "pruned_heads": {},
67
+ "remove_invalid_values": false,
68
+ "repetition_penalty": 1.0,
69
+ "return_dict": true,
70
+ "return_dict_in_generate": false,
71
+ "sep_token_id": null,
72
+ "suppress_tokens": null,
73
+ "task_specific_params": null,
74
+ "temperature": 1.0,
75
+ "tf_legacy_loss": false,
76
+ "tie_encoder_decoder": false,
77
+ "tie_word_embeddings": true,
78
+ "tokenizer_class": null,
79
+ "top_k": 50,
80
+ "top_p": 1.0,
81
+ "torch_dtype": null,
82
+ "torchscript": false,
83
+ "transformers_version": "4.30.2",
84
+ "typical_p": 1.0,
85
+ "use_bfloat16": false,
86
+ "vocab_size": 49408
87
+ },
88
+ "torch_dtype": "float32",
89
+ "transformers_version": null,
90
+ "vision_config": {
91
+ "_name_or_path": "",
92
+ "add_cross_attention": false,
93
+ "architectures": null,
94
+ "attention_dropout": 0.0,
95
+ "bad_words_ids": null,
96
+ "begin_suppress_tokens": null,
97
+ "bos_token_id": null,
98
+ "chunk_size_feed_forward": 0,
99
+ "cross_attention_hidden_size": null,
100
+ "decoder_start_token_id": null,
101
+ "diversity_penalty": 0.0,
102
+ "do_sample": false,
103
+ "dropout": 0.0,
104
+ "early_stopping": false,
105
+ "encoder_no_repeat_ngram_size": 0,
106
+ "eos_token_id": null,
107
+ "exponential_decay_length_penalty": null,
108
+ "finetuning_task": null,
109
+ "forced_bos_token_id": null,
110
+ "forced_eos_token_id": null,
111
+ "hidden_act": "quick_gelu",
112
+ "hidden_size": 1024,
113
+ "id2label": {
114
+ "0": "LABEL_0",
115
+ "1": "LABEL_1"
116
+ },
117
+ "image_size": 224,
118
+ "initializer_factor": 1.0,
119
+ "initializer_range": 0.02,
120
+ "intermediate_size": 4096,
121
+ "is_decoder": false,
122
+ "is_encoder_decoder": false,
123
+ "label2id": {
124
+ "LABEL_0": 0,
125
+ "LABEL_1": 1
126
+ },
127
+ "layer_norm_eps": 1e-05,
128
+ "length_penalty": 1.0,
129
+ "max_length": 20,
130
+ "min_length": 0,
131
+ "model_type": "clip_vision_model",
132
+ "no_repeat_ngram_size": 0,
133
+ "num_attention_heads": 16,
134
+ "num_beam_groups": 1,
135
+ "num_beams": 1,
136
+ "num_channels": 3,
137
+ "num_hidden_layers": 24,
138
+ "num_return_sequences": 1,
139
+ "output_attentions": false,
140
+ "output_hidden_states": false,
141
+ "output_scores": false,
142
+ "pad_token_id": null,
143
+ "patch_size": 14,
144
+ "prefix": null,
145
+ "problem_type": null,
146
+ "projection_dim": 512,
147
+ "pruned_heads": {},
148
+ "remove_invalid_values": false,
149
+ "repetition_penalty": 1.0,
150
+ "return_dict": true,
151
+ "return_dict_in_generate": false,
152
+ "sep_token_id": null,
153
+ "suppress_tokens": null,
154
+ "task_specific_params": null,
155
+ "temperature": 1.0,
156
+ "tf_legacy_loss": false,
157
+ "tie_encoder_decoder": false,
158
+ "tie_word_embeddings": true,
159
+ "tokenizer_class": null,
160
+ "top_k": 50,
161
+ "top_p": 1.0,
162
+ "torch_dtype": null,
163
+ "torchscript": false,
164
+ "transformers_version": "4.30.2",
165
+ "typical_p": 1.0,
166
+ "use_bfloat16": false
167
+ }
168
+ }
safety_checker/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16d28f2b37109f222cdc33620fdd262102ac32112be0352a7f77e9614b35a394
3
+ size 1216064769
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.19.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "num_train_timesteps": 1000,
9
+ "prediction_type": "epsilon",
10
+ "set_alpha_to_one": false,
11
+ "skip_prk_steps": true,
12
+ "steps_offset": 1,
13
+ "timestep_spacing": "leading",
14
+ "trained_betas": null
15
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "runwayml/stable-diffusion-v1-5",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 768,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.30.2",
24
+ "vocab_size": 49409
25
+ }
text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b8a840bf2896c92287fe88e573e06bd93f92e2281da2366fd9453d0d84859f3
3
+ size 492312865
tokenizer/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<simple-design>": 49408
3
+ }