End of training
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- README.md +17 -0
- checkpoint-1000/latest +1 -0
- checkpoint-1000/pytorch_model/mp_rank_00_model_states.pt +3 -0
- checkpoint-1000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- checkpoint-1000/random_states_0.pkl +3 -0
- checkpoint-1000/scheduler.bin +3 -0
- checkpoint-1000/zero_to_fp32.py +578 -0
- checkpoint-1500/latest +1 -0
- checkpoint-1500/pytorch_model/mp_rank_00_model_states.pt +3 -0
- checkpoint-1500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- checkpoint-1500/random_states_0.pkl +3 -0
- checkpoint-1500/scheduler.bin +3 -0
- checkpoint-1500/zero_to_fp32.py +578 -0
- checkpoint-2000/latest +1 -0
- checkpoint-2000/pytorch_model/mp_rank_00_model_states.pt +3 -0
- checkpoint-2000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- checkpoint-2000/random_states_0.pkl +3 -0
- checkpoint-2000/scheduler.bin +3 -0
- checkpoint-2000/zero_to_fp32.py +578 -0
- checkpoint-2500/latest +1 -0
- checkpoint-2500/pytorch_model/mp_rank_00_model_states.pt +3 -0
- checkpoint-2500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- checkpoint-2500/random_states_0.pkl +3 -0
- checkpoint-2500/scheduler.bin +3 -0
- checkpoint-2500/zero_to_fp32.py +578 -0
- checkpoint-3000/latest +1 -0
- checkpoint-3000/pytorch_model/mp_rank_00_model_states.pt +3 -0
- checkpoint-3000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- checkpoint-3000/random_states_0.pkl +3 -0
- checkpoint-3000/scheduler.bin +3 -0
- checkpoint-3000/zero_to_fp32.py +578 -0
- checkpoint-500/latest +1 -0
- checkpoint-500/pytorch_model/mp_rank_00_model_states.pt +3 -0
- checkpoint-500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- checkpoint-500/random_states_0.pkl +3 -0
- checkpoint-500/scheduler.bin +3 -0
- checkpoint-500/zero_to_fp32.py +578 -0
- feature_extractor/preprocessor_config.json +28 -0
- learned_embeds-steps-1000.bin +3 -0
- learned_embeds-steps-1500.bin +3 -0
- learned_embeds-steps-2000.bin +3 -0
- learned_embeds-steps-2500.bin +3 -0
- learned_embeds-steps-3000.bin +3 -0
- learned_embeds-steps-500.bin +3 -0
- learned_embeds.bin +3 -0
- logs/textual_inversion/1695306969.1297772/events.out.tfevents.1695306969.90fb41ce5dc1.2595613.1 +3 -0
- logs/textual_inversion/1695306969.1304317/hparams.yml +46 -0
- logs/textual_inversion/events.out.tfevents.1695306969.90fb41ce5dc1.2595613.0 +3 -0
- model_index.json +33 -0
- safety_checker/config.json +168 -0
README.md
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
---
|
3 |
+
license: creativeml-openrail-m
|
4 |
+
base_model: runwayml/stable-diffusion-v1-5
|
5 |
+
tags:
|
6 |
+
- stable-diffusion
|
7 |
+
- stable-diffusion-diffusers
|
8 |
+
- text-to-image
|
9 |
+
- diffusers
|
10 |
+
- textual_inversion
|
11 |
+
inference: true
|
12 |
+
---
|
13 |
+
|
14 |
+
# Textual inversion text2image fine-tuning - Govern/textual_inversion_airplane
|
15 |
+
These are textual inversion adaption weights for runwayml/stable-diffusion-v1-5. You can find some example images in the following.
|
16 |
+
|
17 |
+
|
checkpoint-1000/latest
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pytorch_model
|
checkpoint-1000/pytorch_model/mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c38f25e1373a28fbbb214dac671492249149d6eecfa44e057584e876457f664a
|
3 |
+
size 832858243
|
checkpoint-1000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:044fba04ea7581f2c10cef8f3cce1c01b802079a5172bb54aa45a831a4339428
|
3 |
+
size 455356505
|
checkpoint-1000/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3beb3f628556d43526347255f5ff3bdd83419ed5a9a003df74706085f43524f6
|
3 |
+
size 15691
|
checkpoint-1000/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bbb19b948b33a080098712534e53a991114e50367c7efbe3f9b4d0cec03eb540
|
3 |
+
size 563
|
checkpoint-1000/zero_to_fp32.py
ADDED
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
# Copyright (c) Microsoft Corporation.
|
4 |
+
# SPDX-License-Identifier: Apache-2.0
|
5 |
+
|
6 |
+
# DeepSpeed Team
|
7 |
+
|
8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
11 |
+
# application.
|
12 |
+
#
|
13 |
+
# example: python zero_to_fp32.py . pytorch_model.bin
|
14 |
+
|
15 |
+
import argparse
|
16 |
+
import torch
|
17 |
+
import glob
|
18 |
+
import math
|
19 |
+
import os
|
20 |
+
import re
|
21 |
+
from collections import OrderedDict
|
22 |
+
from dataclasses import dataclass
|
23 |
+
|
24 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
25 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
26 |
+
from deepspeed.utils import logger
|
27 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
28 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
29 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class zero_model_state:
|
34 |
+
buffers: dict()
|
35 |
+
param_shapes: dict()
|
36 |
+
shared_params: list
|
37 |
+
ds_version: int
|
38 |
+
frozen_param_shapes: dict()
|
39 |
+
frozen_param_fragments: dict()
|
40 |
+
|
41 |
+
|
42 |
+
debug = 0
|
43 |
+
|
44 |
+
# load to cpu
|
45 |
+
device = torch.device('cpu')
|
46 |
+
|
47 |
+
|
48 |
+
def atoi(text):
|
49 |
+
return int(text) if text.isdigit() else text
|
50 |
+
|
51 |
+
|
52 |
+
def natural_keys(text):
|
53 |
+
'''
|
54 |
+
alist.sort(key=natural_keys) sorts in human order
|
55 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
56 |
+
(See Toothy's implementation in the comments)
|
57 |
+
'''
|
58 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
59 |
+
|
60 |
+
|
61 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
62 |
+
if not os.path.isdir(checkpoint_dir):
|
63 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
64 |
+
|
65 |
+
# there should be only one file
|
66 |
+
if zero_stage <= 2:
|
67 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
68 |
+
elif zero_stage == 3:
|
69 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
70 |
+
|
71 |
+
if not os.path.exists(file):
|
72 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
73 |
+
|
74 |
+
return file
|
75 |
+
|
76 |
+
|
77 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
78 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
79 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
80 |
+
|
81 |
+
if len(ckpt_files) == 0:
|
82 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
83 |
+
|
84 |
+
return ckpt_files
|
85 |
+
|
86 |
+
|
87 |
+
def get_optim_files(checkpoint_dir):
|
88 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
89 |
+
|
90 |
+
|
91 |
+
def get_model_state_files(checkpoint_dir):
|
92 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
93 |
+
|
94 |
+
|
95 |
+
def parse_model_states(files):
|
96 |
+
zero_model_states = []
|
97 |
+
for file in files:
|
98 |
+
state_dict = torch.load(file, map_location=device)
|
99 |
+
|
100 |
+
if BUFFER_NAMES not in state_dict:
|
101 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
102 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
103 |
+
if debug:
|
104 |
+
print("Found buffers:", buffer_names)
|
105 |
+
|
106 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
107 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
108 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
109 |
+
|
110 |
+
# collect parameters that are included in param_shapes
|
111 |
+
param_names = []
|
112 |
+
for s in param_shapes:
|
113 |
+
for name in s.keys():
|
114 |
+
param_names.append(name)
|
115 |
+
|
116 |
+
# update with frozen parameters
|
117 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
118 |
+
if frozen_param_shapes is not None:
|
119 |
+
if debug:
|
120 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
121 |
+
param_names += list(frozen_param_shapes.keys())
|
122 |
+
|
123 |
+
# handle shared params
|
124 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
125 |
+
|
126 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
127 |
+
|
128 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
129 |
+
|
130 |
+
z_model_state = zero_model_state(buffers=buffers,
|
131 |
+
param_shapes=param_shapes,
|
132 |
+
shared_params=shared_params,
|
133 |
+
ds_version=ds_version,
|
134 |
+
frozen_param_shapes=frozen_param_shapes,
|
135 |
+
frozen_param_fragments=frozen_param_fragments)
|
136 |
+
zero_model_states.append(z_model_state)
|
137 |
+
|
138 |
+
return zero_model_states
|
139 |
+
|
140 |
+
|
141 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
142 |
+
|
143 |
+
total_files = len(files)
|
144 |
+
state_dicts = []
|
145 |
+
for f in files:
|
146 |
+
state_dicts.append(torch.load(f, map_location=device))
|
147 |
+
|
148 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
149 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
150 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
151 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
152 |
+
|
153 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
154 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
155 |
+
# use the max of the partition_count to get the dp world_size.
|
156 |
+
|
157 |
+
if type(world_size) is list:
|
158 |
+
world_size = max(world_size)
|
159 |
+
|
160 |
+
if world_size != total_files:
|
161 |
+
raise ValueError(
|
162 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
163 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
164 |
+
)
|
165 |
+
|
166 |
+
# the groups are named differently in each stage
|
167 |
+
if zero_stage <= 2:
|
168 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
169 |
+
elif zero_stage == 3:
|
170 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
171 |
+
else:
|
172 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
173 |
+
|
174 |
+
if zero_stage <= 2:
|
175 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
176 |
+
elif zero_stage == 3:
|
177 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
178 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
179 |
+
#
|
180 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
181 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
182 |
+
|
183 |
+
fp32_flat_groups = [
|
184 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
185 |
+
]
|
186 |
+
|
187 |
+
return zero_stage, world_size, fp32_flat_groups
|
188 |
+
|
189 |
+
|
190 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
|
191 |
+
"""
|
192 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
193 |
+
|
194 |
+
Args:
|
195 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
196 |
+
|
197 |
+
"""
|
198 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
199 |
+
|
200 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
201 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
202 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
203 |
+
|
204 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
205 |
+
|
206 |
+
zero_model_states = parse_model_states(model_files)
|
207 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
208 |
+
|
209 |
+
if zero_stage <= 2:
|
210 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
211 |
+
elif zero_stage == 3:
|
212 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
213 |
+
|
214 |
+
|
215 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
216 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
217 |
+
return
|
218 |
+
|
219 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
220 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
221 |
+
|
222 |
+
if debug:
|
223 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
224 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
225 |
+
|
226 |
+
wanted_params = len(frozen_param_shapes)
|
227 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
228 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
229 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
230 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
231 |
+
|
232 |
+
total_params = 0
|
233 |
+
total_numel = 0
|
234 |
+
for name, shape in frozen_param_shapes.items():
|
235 |
+
total_params += 1
|
236 |
+
unpartitioned_numel = shape.numel()
|
237 |
+
total_numel += unpartitioned_numel
|
238 |
+
|
239 |
+
state_dict[name] = frozen_param_fragments[name]
|
240 |
+
|
241 |
+
if debug:
|
242 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
243 |
+
|
244 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
245 |
+
|
246 |
+
|
247 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
248 |
+
param_shapes = zero_model_states[0].param_shapes
|
249 |
+
|
250 |
+
# Reconstruction protocol:
|
251 |
+
#
|
252 |
+
# XXX: document this
|
253 |
+
|
254 |
+
if debug:
|
255 |
+
for i in range(world_size):
|
256 |
+
for j in range(len(fp32_flat_groups[0])):
|
257 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
258 |
+
|
259 |
+
# XXX: memory usage doubles here (zero2)
|
260 |
+
num_param_groups = len(fp32_flat_groups[0])
|
261 |
+
merged_single_partition_of_fp32_groups = []
|
262 |
+
for i in range(num_param_groups):
|
263 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
264 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
265 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
266 |
+
avail_numel = sum(
|
267 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
268 |
+
|
269 |
+
if debug:
|
270 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
271 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
272 |
+
# not asserting if there is a mismatch due to possible padding
|
273 |
+
print(f"Have {avail_numel} numels to process.")
|
274 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
275 |
+
|
276 |
+
# params
|
277 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
278 |
+
# out-of-core computing solution
|
279 |
+
total_numel = 0
|
280 |
+
total_params = 0
|
281 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
282 |
+
offset = 0
|
283 |
+
avail_numel = full_single_fp32_vector.numel()
|
284 |
+
for name, shape in shapes.items():
|
285 |
+
|
286 |
+
unpartitioned_numel = shape.numel()
|
287 |
+
total_numel += unpartitioned_numel
|
288 |
+
total_params += 1
|
289 |
+
|
290 |
+
if debug:
|
291 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
292 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
293 |
+
offset += unpartitioned_numel
|
294 |
+
|
295 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
296 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
297 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
298 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
299 |
+
align_to = 2 * world_size
|
300 |
+
|
301 |
+
def zero2_align(x):
|
302 |
+
return align_to * math.ceil(x / align_to)
|
303 |
+
|
304 |
+
if debug:
|
305 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
306 |
+
|
307 |
+
offset = zero2_align(offset)
|
308 |
+
avail_numel = zero2_align(avail_numel)
|
309 |
+
|
310 |
+
if debug:
|
311 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
312 |
+
|
313 |
+
# Sanity check
|
314 |
+
if offset != avail_numel:
|
315 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
316 |
+
|
317 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
318 |
+
|
319 |
+
|
320 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
321 |
+
state_dict = OrderedDict()
|
322 |
+
|
323 |
+
# buffers
|
324 |
+
buffers = zero_model_states[0].buffers
|
325 |
+
state_dict.update(buffers)
|
326 |
+
if debug:
|
327 |
+
print(f"added {len(buffers)} buffers")
|
328 |
+
|
329 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
330 |
+
|
331 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
332 |
+
|
333 |
+
# recover shared parameters
|
334 |
+
for pair in zero_model_states[0].shared_params:
|
335 |
+
if pair[1] in state_dict:
|
336 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
337 |
+
|
338 |
+
return state_dict
|
339 |
+
|
340 |
+
|
341 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
342 |
+
remainder = unpartitioned_numel % world_size
|
343 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
344 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
345 |
+
return partitioned_numel, padding_numel
|
346 |
+
|
347 |
+
|
348 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
349 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
350 |
+
return
|
351 |
+
|
352 |
+
if debug:
|
353 |
+
for i in range(world_size):
|
354 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
355 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
356 |
+
|
357 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
358 |
+
wanted_params = len(frozen_param_shapes)
|
359 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
360 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
361 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
362 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
363 |
+
|
364 |
+
total_params = 0
|
365 |
+
total_numel = 0
|
366 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
367 |
+
total_params += 1
|
368 |
+
unpartitioned_numel = shape.numel()
|
369 |
+
total_numel += unpartitioned_numel
|
370 |
+
|
371 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
372 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
373 |
+
|
374 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
375 |
+
|
376 |
+
if debug:
|
377 |
+
print(
|
378 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
379 |
+
)
|
380 |
+
|
381 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
382 |
+
|
383 |
+
|
384 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
385 |
+
param_shapes = zero_model_states[0].param_shapes
|
386 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
387 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
388 |
+
# param, re-consolidating each param, while dealing with padding if any
|
389 |
+
|
390 |
+
# merge list of dicts, preserving order
|
391 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
392 |
+
|
393 |
+
if debug:
|
394 |
+
for i in range(world_size):
|
395 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
396 |
+
|
397 |
+
wanted_params = len(param_shapes)
|
398 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
399 |
+
# not asserting if there is a mismatch due to possible padding
|
400 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
401 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
402 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
403 |
+
|
404 |
+
# params
|
405 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
406 |
+
# out-of-core computing solution
|
407 |
+
offset = 0
|
408 |
+
total_numel = 0
|
409 |
+
total_params = 0
|
410 |
+
for name, shape in param_shapes.items():
|
411 |
+
|
412 |
+
unpartitioned_numel = shape.numel()
|
413 |
+
total_numel += unpartitioned_numel
|
414 |
+
total_params += 1
|
415 |
+
|
416 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
417 |
+
|
418 |
+
if debug:
|
419 |
+
print(
|
420 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
421 |
+
)
|
422 |
+
|
423 |
+
# XXX: memory usage doubles here
|
424 |
+
state_dict[name] = torch.cat(
|
425 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
426 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
427 |
+
offset += partitioned_numel
|
428 |
+
|
429 |
+
offset *= world_size
|
430 |
+
|
431 |
+
# Sanity check
|
432 |
+
if offset != avail_numel:
|
433 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
434 |
+
|
435 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
436 |
+
|
437 |
+
|
438 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
439 |
+
state_dict = OrderedDict()
|
440 |
+
|
441 |
+
# buffers
|
442 |
+
buffers = zero_model_states[0].buffers
|
443 |
+
state_dict.update(buffers)
|
444 |
+
if debug:
|
445 |
+
print(f"added {len(buffers)} buffers")
|
446 |
+
|
447 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
448 |
+
|
449 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
450 |
+
|
451 |
+
# recover shared parameters
|
452 |
+
for pair in zero_model_states[0].shared_params:
|
453 |
+
if pair[1] in state_dict:
|
454 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
455 |
+
|
456 |
+
return state_dict
|
457 |
+
|
458 |
+
|
459 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
|
460 |
+
"""
|
461 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
462 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
463 |
+
via a model hub.
|
464 |
+
|
465 |
+
Args:
|
466 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
467 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
468 |
+
|
469 |
+
Returns:
|
470 |
+
- pytorch ``state_dict``
|
471 |
+
|
472 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
473 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
474 |
+
the checkpoint.
|
475 |
+
|
476 |
+
A typical usage might be ::
|
477 |
+
|
478 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
479 |
+
# do the training and checkpoint saving
|
480 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
481 |
+
model = model.cpu() # move to cpu
|
482 |
+
model.load_state_dict(state_dict)
|
483 |
+
# submit to model hub or save the model to share with others
|
484 |
+
|
485 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
486 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
487 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
488 |
+
|
489 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
490 |
+
|
491 |
+
"""
|
492 |
+
if tag is None:
|
493 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
494 |
+
if os.path.isfile(latest_path):
|
495 |
+
with open(latest_path, 'r') as fd:
|
496 |
+
tag = fd.read().strip()
|
497 |
+
else:
|
498 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
499 |
+
|
500 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
501 |
+
|
502 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
503 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
504 |
+
|
505 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
|
506 |
+
|
507 |
+
|
508 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
|
509 |
+
"""
|
510 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
511 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
512 |
+
|
513 |
+
Args:
|
514 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
515 |
+
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
|
516 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
517 |
+
"""
|
518 |
+
|
519 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
520 |
+
print(f"Saving fp32 state dict to {output_file}")
|
521 |
+
torch.save(state_dict, output_file)
|
522 |
+
|
523 |
+
|
524 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
525 |
+
"""
|
526 |
+
1. Put the provided model to cpu
|
527 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
528 |
+
3. Load it into the provided model
|
529 |
+
|
530 |
+
Args:
|
531 |
+
- ``model``: the model object to update
|
532 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
533 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
534 |
+
|
535 |
+
Returns:
|
536 |
+
- ``model`: modified model
|
537 |
+
|
538 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
539 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
540 |
+
conveniently placed for you in the checkpoint folder.
|
541 |
+
|
542 |
+
A typical usage might be ::
|
543 |
+
|
544 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
545 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
546 |
+
# submit to model hub or save the model to share with others
|
547 |
+
|
548 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
549 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
550 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
551 |
+
|
552 |
+
"""
|
553 |
+
logger.info(f"Extracting fp32 weights")
|
554 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
555 |
+
|
556 |
+
logger.info(f"Overwriting model with fp32 weights")
|
557 |
+
model = model.cpu()
|
558 |
+
model.load_state_dict(state_dict, strict=False)
|
559 |
+
|
560 |
+
return model
|
561 |
+
|
562 |
+
|
563 |
+
if __name__ == "__main__":
|
564 |
+
|
565 |
+
parser = argparse.ArgumentParser()
|
566 |
+
parser.add_argument("checkpoint_dir",
|
567 |
+
type=str,
|
568 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
569 |
+
parser.add_argument(
|
570 |
+
"output_file",
|
571 |
+
type=str,
|
572 |
+
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
|
573 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
574 |
+
args = parser.parse_args()
|
575 |
+
|
576 |
+
debug = args.debug
|
577 |
+
|
578 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
|
checkpoint-1500/latest
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pytorch_model
|
checkpoint-1500/pytorch_model/mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:adac4479ac54c7c58b83a1a434bcd9ab78641ea3f9f7e6d7dcbd89bb9346054f
|
3 |
+
size 832858243
|
checkpoint-1500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a00be48b8c301bfe4f1c9ff63ebb37e521ac100b42601862048321c2ccfef40
|
3 |
+
size 455356505
|
checkpoint-1500/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f32239cb7d99a4b3aa46a0e9147b55c72081fcf5ba38b7ee4d6aa37b3427e57
|
3 |
+
size 15691
|
checkpoint-1500/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7abf0b3af2013b920618166f35e17d899fce776df72f2c2a64c13989f4b8a6a8
|
3 |
+
size 563
|
checkpoint-1500/zero_to_fp32.py
ADDED
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
# Copyright (c) Microsoft Corporation.
|
4 |
+
# SPDX-License-Identifier: Apache-2.0
|
5 |
+
|
6 |
+
# DeepSpeed Team
|
7 |
+
|
8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
11 |
+
# application.
|
12 |
+
#
|
13 |
+
# example: python zero_to_fp32.py . pytorch_model.bin
|
14 |
+
|
15 |
+
import argparse
|
16 |
+
import torch
|
17 |
+
import glob
|
18 |
+
import math
|
19 |
+
import os
|
20 |
+
import re
|
21 |
+
from collections import OrderedDict
|
22 |
+
from dataclasses import dataclass
|
23 |
+
|
24 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
25 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
26 |
+
from deepspeed.utils import logger
|
27 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
28 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
29 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class zero_model_state:
|
34 |
+
buffers: dict()
|
35 |
+
param_shapes: dict()
|
36 |
+
shared_params: list
|
37 |
+
ds_version: int
|
38 |
+
frozen_param_shapes: dict()
|
39 |
+
frozen_param_fragments: dict()
|
40 |
+
|
41 |
+
|
42 |
+
debug = 0
|
43 |
+
|
44 |
+
# load to cpu
|
45 |
+
device = torch.device('cpu')
|
46 |
+
|
47 |
+
|
48 |
+
def atoi(text):
|
49 |
+
return int(text) if text.isdigit() else text
|
50 |
+
|
51 |
+
|
52 |
+
def natural_keys(text):
|
53 |
+
'''
|
54 |
+
alist.sort(key=natural_keys) sorts in human order
|
55 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
56 |
+
(See Toothy's implementation in the comments)
|
57 |
+
'''
|
58 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
59 |
+
|
60 |
+
|
61 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
62 |
+
if not os.path.isdir(checkpoint_dir):
|
63 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
64 |
+
|
65 |
+
# there should be only one file
|
66 |
+
if zero_stage <= 2:
|
67 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
68 |
+
elif zero_stage == 3:
|
69 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
70 |
+
|
71 |
+
if not os.path.exists(file):
|
72 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
73 |
+
|
74 |
+
return file
|
75 |
+
|
76 |
+
|
77 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
78 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
79 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
80 |
+
|
81 |
+
if len(ckpt_files) == 0:
|
82 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
83 |
+
|
84 |
+
return ckpt_files
|
85 |
+
|
86 |
+
|
87 |
+
def get_optim_files(checkpoint_dir):
|
88 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
89 |
+
|
90 |
+
|
91 |
+
def get_model_state_files(checkpoint_dir):
|
92 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
93 |
+
|
94 |
+
|
95 |
+
def parse_model_states(files):
|
96 |
+
zero_model_states = []
|
97 |
+
for file in files:
|
98 |
+
state_dict = torch.load(file, map_location=device)
|
99 |
+
|
100 |
+
if BUFFER_NAMES not in state_dict:
|
101 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
102 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
103 |
+
if debug:
|
104 |
+
print("Found buffers:", buffer_names)
|
105 |
+
|
106 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
107 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
108 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
109 |
+
|
110 |
+
# collect parameters that are included in param_shapes
|
111 |
+
param_names = []
|
112 |
+
for s in param_shapes:
|
113 |
+
for name in s.keys():
|
114 |
+
param_names.append(name)
|
115 |
+
|
116 |
+
# update with frozen parameters
|
117 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
118 |
+
if frozen_param_shapes is not None:
|
119 |
+
if debug:
|
120 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
121 |
+
param_names += list(frozen_param_shapes.keys())
|
122 |
+
|
123 |
+
# handle shared params
|
124 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
125 |
+
|
126 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
127 |
+
|
128 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
129 |
+
|
130 |
+
z_model_state = zero_model_state(buffers=buffers,
|
131 |
+
param_shapes=param_shapes,
|
132 |
+
shared_params=shared_params,
|
133 |
+
ds_version=ds_version,
|
134 |
+
frozen_param_shapes=frozen_param_shapes,
|
135 |
+
frozen_param_fragments=frozen_param_fragments)
|
136 |
+
zero_model_states.append(z_model_state)
|
137 |
+
|
138 |
+
return zero_model_states
|
139 |
+
|
140 |
+
|
141 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
142 |
+
|
143 |
+
total_files = len(files)
|
144 |
+
state_dicts = []
|
145 |
+
for f in files:
|
146 |
+
state_dicts.append(torch.load(f, map_location=device))
|
147 |
+
|
148 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
149 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
150 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
151 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
152 |
+
|
153 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
154 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
155 |
+
# use the max of the partition_count to get the dp world_size.
|
156 |
+
|
157 |
+
if type(world_size) is list:
|
158 |
+
world_size = max(world_size)
|
159 |
+
|
160 |
+
if world_size != total_files:
|
161 |
+
raise ValueError(
|
162 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
163 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
164 |
+
)
|
165 |
+
|
166 |
+
# the groups are named differently in each stage
|
167 |
+
if zero_stage <= 2:
|
168 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
169 |
+
elif zero_stage == 3:
|
170 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
171 |
+
else:
|
172 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
173 |
+
|
174 |
+
if zero_stage <= 2:
|
175 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
176 |
+
elif zero_stage == 3:
|
177 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
178 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
179 |
+
#
|
180 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
181 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
182 |
+
|
183 |
+
fp32_flat_groups = [
|
184 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
185 |
+
]
|
186 |
+
|
187 |
+
return zero_stage, world_size, fp32_flat_groups
|
188 |
+
|
189 |
+
|
190 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
|
191 |
+
"""
|
192 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
193 |
+
|
194 |
+
Args:
|
195 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
196 |
+
|
197 |
+
"""
|
198 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
199 |
+
|
200 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
201 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
202 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
203 |
+
|
204 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
205 |
+
|
206 |
+
zero_model_states = parse_model_states(model_files)
|
207 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
208 |
+
|
209 |
+
if zero_stage <= 2:
|
210 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
211 |
+
elif zero_stage == 3:
|
212 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
213 |
+
|
214 |
+
|
215 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
216 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
217 |
+
return
|
218 |
+
|
219 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
220 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
221 |
+
|
222 |
+
if debug:
|
223 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
224 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
225 |
+
|
226 |
+
wanted_params = len(frozen_param_shapes)
|
227 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
228 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
229 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
230 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
231 |
+
|
232 |
+
total_params = 0
|
233 |
+
total_numel = 0
|
234 |
+
for name, shape in frozen_param_shapes.items():
|
235 |
+
total_params += 1
|
236 |
+
unpartitioned_numel = shape.numel()
|
237 |
+
total_numel += unpartitioned_numel
|
238 |
+
|
239 |
+
state_dict[name] = frozen_param_fragments[name]
|
240 |
+
|
241 |
+
if debug:
|
242 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
243 |
+
|
244 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
245 |
+
|
246 |
+
|
247 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
248 |
+
param_shapes = zero_model_states[0].param_shapes
|
249 |
+
|
250 |
+
# Reconstruction protocol:
|
251 |
+
#
|
252 |
+
# XXX: document this
|
253 |
+
|
254 |
+
if debug:
|
255 |
+
for i in range(world_size):
|
256 |
+
for j in range(len(fp32_flat_groups[0])):
|
257 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
258 |
+
|
259 |
+
# XXX: memory usage doubles here (zero2)
|
260 |
+
num_param_groups = len(fp32_flat_groups[0])
|
261 |
+
merged_single_partition_of_fp32_groups = []
|
262 |
+
for i in range(num_param_groups):
|
263 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
264 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
265 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
266 |
+
avail_numel = sum(
|
267 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
268 |
+
|
269 |
+
if debug:
|
270 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
271 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
272 |
+
# not asserting if there is a mismatch due to possible padding
|
273 |
+
print(f"Have {avail_numel} numels to process.")
|
274 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
275 |
+
|
276 |
+
# params
|
277 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
278 |
+
# out-of-core computing solution
|
279 |
+
total_numel = 0
|
280 |
+
total_params = 0
|
281 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
282 |
+
offset = 0
|
283 |
+
avail_numel = full_single_fp32_vector.numel()
|
284 |
+
for name, shape in shapes.items():
|
285 |
+
|
286 |
+
unpartitioned_numel = shape.numel()
|
287 |
+
total_numel += unpartitioned_numel
|
288 |
+
total_params += 1
|
289 |
+
|
290 |
+
if debug:
|
291 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
292 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
293 |
+
offset += unpartitioned_numel
|
294 |
+
|
295 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
296 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
297 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
298 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
299 |
+
align_to = 2 * world_size
|
300 |
+
|
301 |
+
def zero2_align(x):
|
302 |
+
return align_to * math.ceil(x / align_to)
|
303 |
+
|
304 |
+
if debug:
|
305 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
306 |
+
|
307 |
+
offset = zero2_align(offset)
|
308 |
+
avail_numel = zero2_align(avail_numel)
|
309 |
+
|
310 |
+
if debug:
|
311 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
312 |
+
|
313 |
+
# Sanity check
|
314 |
+
if offset != avail_numel:
|
315 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
316 |
+
|
317 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
318 |
+
|
319 |
+
|
320 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
321 |
+
state_dict = OrderedDict()
|
322 |
+
|
323 |
+
# buffers
|
324 |
+
buffers = zero_model_states[0].buffers
|
325 |
+
state_dict.update(buffers)
|
326 |
+
if debug:
|
327 |
+
print(f"added {len(buffers)} buffers")
|
328 |
+
|
329 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
330 |
+
|
331 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
332 |
+
|
333 |
+
# recover shared parameters
|
334 |
+
for pair in zero_model_states[0].shared_params:
|
335 |
+
if pair[1] in state_dict:
|
336 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
337 |
+
|
338 |
+
return state_dict
|
339 |
+
|
340 |
+
|
341 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
342 |
+
remainder = unpartitioned_numel % world_size
|
343 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
344 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
345 |
+
return partitioned_numel, padding_numel
|
346 |
+
|
347 |
+
|
348 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
349 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
350 |
+
return
|
351 |
+
|
352 |
+
if debug:
|
353 |
+
for i in range(world_size):
|
354 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
355 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
356 |
+
|
357 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
358 |
+
wanted_params = len(frozen_param_shapes)
|
359 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
360 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
361 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
362 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
363 |
+
|
364 |
+
total_params = 0
|
365 |
+
total_numel = 0
|
366 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
367 |
+
total_params += 1
|
368 |
+
unpartitioned_numel = shape.numel()
|
369 |
+
total_numel += unpartitioned_numel
|
370 |
+
|
371 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
372 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
373 |
+
|
374 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
375 |
+
|
376 |
+
if debug:
|
377 |
+
print(
|
378 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
379 |
+
)
|
380 |
+
|
381 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
382 |
+
|
383 |
+
|
384 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
385 |
+
param_shapes = zero_model_states[0].param_shapes
|
386 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
387 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
388 |
+
# param, re-consolidating each param, while dealing with padding if any
|
389 |
+
|
390 |
+
# merge list of dicts, preserving order
|
391 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
392 |
+
|
393 |
+
if debug:
|
394 |
+
for i in range(world_size):
|
395 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
396 |
+
|
397 |
+
wanted_params = len(param_shapes)
|
398 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
399 |
+
# not asserting if there is a mismatch due to possible padding
|
400 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
401 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
402 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
403 |
+
|
404 |
+
# params
|
405 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
406 |
+
# out-of-core computing solution
|
407 |
+
offset = 0
|
408 |
+
total_numel = 0
|
409 |
+
total_params = 0
|
410 |
+
for name, shape in param_shapes.items():
|
411 |
+
|
412 |
+
unpartitioned_numel = shape.numel()
|
413 |
+
total_numel += unpartitioned_numel
|
414 |
+
total_params += 1
|
415 |
+
|
416 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
417 |
+
|
418 |
+
if debug:
|
419 |
+
print(
|
420 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
421 |
+
)
|
422 |
+
|
423 |
+
# XXX: memory usage doubles here
|
424 |
+
state_dict[name] = torch.cat(
|
425 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
426 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
427 |
+
offset += partitioned_numel
|
428 |
+
|
429 |
+
offset *= world_size
|
430 |
+
|
431 |
+
# Sanity check
|
432 |
+
if offset != avail_numel:
|
433 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
434 |
+
|
435 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
436 |
+
|
437 |
+
|
438 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
439 |
+
state_dict = OrderedDict()
|
440 |
+
|
441 |
+
# buffers
|
442 |
+
buffers = zero_model_states[0].buffers
|
443 |
+
state_dict.update(buffers)
|
444 |
+
if debug:
|
445 |
+
print(f"added {len(buffers)} buffers")
|
446 |
+
|
447 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
448 |
+
|
449 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
450 |
+
|
451 |
+
# recover shared parameters
|
452 |
+
for pair in zero_model_states[0].shared_params:
|
453 |
+
if pair[1] in state_dict:
|
454 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
455 |
+
|
456 |
+
return state_dict
|
457 |
+
|
458 |
+
|
459 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
|
460 |
+
"""
|
461 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
462 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
463 |
+
via a model hub.
|
464 |
+
|
465 |
+
Args:
|
466 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
467 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
468 |
+
|
469 |
+
Returns:
|
470 |
+
- pytorch ``state_dict``
|
471 |
+
|
472 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
473 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
474 |
+
the checkpoint.
|
475 |
+
|
476 |
+
A typical usage might be ::
|
477 |
+
|
478 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
479 |
+
# do the training and checkpoint saving
|
480 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
481 |
+
model = model.cpu() # move to cpu
|
482 |
+
model.load_state_dict(state_dict)
|
483 |
+
# submit to model hub or save the model to share with others
|
484 |
+
|
485 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
486 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
487 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
488 |
+
|
489 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
490 |
+
|
491 |
+
"""
|
492 |
+
if tag is None:
|
493 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
494 |
+
if os.path.isfile(latest_path):
|
495 |
+
with open(latest_path, 'r') as fd:
|
496 |
+
tag = fd.read().strip()
|
497 |
+
else:
|
498 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
499 |
+
|
500 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
501 |
+
|
502 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
503 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
504 |
+
|
505 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
|
506 |
+
|
507 |
+
|
508 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
|
509 |
+
"""
|
510 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
511 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
512 |
+
|
513 |
+
Args:
|
514 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
515 |
+
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
|
516 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
517 |
+
"""
|
518 |
+
|
519 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
520 |
+
print(f"Saving fp32 state dict to {output_file}")
|
521 |
+
torch.save(state_dict, output_file)
|
522 |
+
|
523 |
+
|
524 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
525 |
+
"""
|
526 |
+
1. Put the provided model to cpu
|
527 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
528 |
+
3. Load it into the provided model
|
529 |
+
|
530 |
+
Args:
|
531 |
+
- ``model``: the model object to update
|
532 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
533 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
534 |
+
|
535 |
+
Returns:
|
536 |
+
- ``model`: modified model
|
537 |
+
|
538 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
539 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
540 |
+
conveniently placed for you in the checkpoint folder.
|
541 |
+
|
542 |
+
A typical usage might be ::
|
543 |
+
|
544 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
545 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
546 |
+
# submit to model hub or save the model to share with others
|
547 |
+
|
548 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
549 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
550 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
551 |
+
|
552 |
+
"""
|
553 |
+
logger.info(f"Extracting fp32 weights")
|
554 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
555 |
+
|
556 |
+
logger.info(f"Overwriting model with fp32 weights")
|
557 |
+
model = model.cpu()
|
558 |
+
model.load_state_dict(state_dict, strict=False)
|
559 |
+
|
560 |
+
return model
|
561 |
+
|
562 |
+
|
563 |
+
if __name__ == "__main__":
|
564 |
+
|
565 |
+
parser = argparse.ArgumentParser()
|
566 |
+
parser.add_argument("checkpoint_dir",
|
567 |
+
type=str,
|
568 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
569 |
+
parser.add_argument(
|
570 |
+
"output_file",
|
571 |
+
type=str,
|
572 |
+
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
|
573 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
574 |
+
args = parser.parse_args()
|
575 |
+
|
576 |
+
debug = args.debug
|
577 |
+
|
578 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
|
checkpoint-2000/latest
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pytorch_model
|
checkpoint-2000/pytorch_model/mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:baeb3fee043ae69cbb068110f7d6537330cf84ff5ca03205faa09e7fbe0136d2
|
3 |
+
size 832858243
|
checkpoint-2000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5974a260a19b090b62d737b17a7e1ebc7c1f41ed8400a9cc55c7f301b81626a8
|
3 |
+
size 455356505
|
checkpoint-2000/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2e26a9c08118bc4735c1c9935ce940854d5451cc0eddbff7c0645ba2392787a8
|
3 |
+
size 15691
|
checkpoint-2000/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7ce55ada40659c32ea01f44d7732f0406a6c9004376f7e5579de396b3481cc2d
|
3 |
+
size 563
|
checkpoint-2000/zero_to_fp32.py
ADDED
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
# Copyright (c) Microsoft Corporation.
|
4 |
+
# SPDX-License-Identifier: Apache-2.0
|
5 |
+
|
6 |
+
# DeepSpeed Team
|
7 |
+
|
8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
11 |
+
# application.
|
12 |
+
#
|
13 |
+
# example: python zero_to_fp32.py . pytorch_model.bin
|
14 |
+
|
15 |
+
import argparse
|
16 |
+
import torch
|
17 |
+
import glob
|
18 |
+
import math
|
19 |
+
import os
|
20 |
+
import re
|
21 |
+
from collections import OrderedDict
|
22 |
+
from dataclasses import dataclass
|
23 |
+
|
24 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
25 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
26 |
+
from deepspeed.utils import logger
|
27 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
28 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
29 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class zero_model_state:
|
34 |
+
buffers: dict()
|
35 |
+
param_shapes: dict()
|
36 |
+
shared_params: list
|
37 |
+
ds_version: int
|
38 |
+
frozen_param_shapes: dict()
|
39 |
+
frozen_param_fragments: dict()
|
40 |
+
|
41 |
+
|
42 |
+
debug = 0
|
43 |
+
|
44 |
+
# load to cpu
|
45 |
+
device = torch.device('cpu')
|
46 |
+
|
47 |
+
|
48 |
+
def atoi(text):
|
49 |
+
return int(text) if text.isdigit() else text
|
50 |
+
|
51 |
+
|
52 |
+
def natural_keys(text):
|
53 |
+
'''
|
54 |
+
alist.sort(key=natural_keys) sorts in human order
|
55 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
56 |
+
(See Toothy's implementation in the comments)
|
57 |
+
'''
|
58 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
59 |
+
|
60 |
+
|
61 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
62 |
+
if not os.path.isdir(checkpoint_dir):
|
63 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
64 |
+
|
65 |
+
# there should be only one file
|
66 |
+
if zero_stage <= 2:
|
67 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
68 |
+
elif zero_stage == 3:
|
69 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
70 |
+
|
71 |
+
if not os.path.exists(file):
|
72 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
73 |
+
|
74 |
+
return file
|
75 |
+
|
76 |
+
|
77 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
78 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
79 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
80 |
+
|
81 |
+
if len(ckpt_files) == 0:
|
82 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
83 |
+
|
84 |
+
return ckpt_files
|
85 |
+
|
86 |
+
|
87 |
+
def get_optim_files(checkpoint_dir):
|
88 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
89 |
+
|
90 |
+
|
91 |
+
def get_model_state_files(checkpoint_dir):
|
92 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
93 |
+
|
94 |
+
|
95 |
+
def parse_model_states(files):
|
96 |
+
zero_model_states = []
|
97 |
+
for file in files:
|
98 |
+
state_dict = torch.load(file, map_location=device)
|
99 |
+
|
100 |
+
if BUFFER_NAMES not in state_dict:
|
101 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
102 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
103 |
+
if debug:
|
104 |
+
print("Found buffers:", buffer_names)
|
105 |
+
|
106 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
107 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
108 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
109 |
+
|
110 |
+
# collect parameters that are included in param_shapes
|
111 |
+
param_names = []
|
112 |
+
for s in param_shapes:
|
113 |
+
for name in s.keys():
|
114 |
+
param_names.append(name)
|
115 |
+
|
116 |
+
# update with frozen parameters
|
117 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
118 |
+
if frozen_param_shapes is not None:
|
119 |
+
if debug:
|
120 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
121 |
+
param_names += list(frozen_param_shapes.keys())
|
122 |
+
|
123 |
+
# handle shared params
|
124 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
125 |
+
|
126 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
127 |
+
|
128 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
129 |
+
|
130 |
+
z_model_state = zero_model_state(buffers=buffers,
|
131 |
+
param_shapes=param_shapes,
|
132 |
+
shared_params=shared_params,
|
133 |
+
ds_version=ds_version,
|
134 |
+
frozen_param_shapes=frozen_param_shapes,
|
135 |
+
frozen_param_fragments=frozen_param_fragments)
|
136 |
+
zero_model_states.append(z_model_state)
|
137 |
+
|
138 |
+
return zero_model_states
|
139 |
+
|
140 |
+
|
141 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
142 |
+
|
143 |
+
total_files = len(files)
|
144 |
+
state_dicts = []
|
145 |
+
for f in files:
|
146 |
+
state_dicts.append(torch.load(f, map_location=device))
|
147 |
+
|
148 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
149 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
150 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
151 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
152 |
+
|
153 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
154 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
155 |
+
# use the max of the partition_count to get the dp world_size.
|
156 |
+
|
157 |
+
if type(world_size) is list:
|
158 |
+
world_size = max(world_size)
|
159 |
+
|
160 |
+
if world_size != total_files:
|
161 |
+
raise ValueError(
|
162 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
163 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
164 |
+
)
|
165 |
+
|
166 |
+
# the groups are named differently in each stage
|
167 |
+
if zero_stage <= 2:
|
168 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
169 |
+
elif zero_stage == 3:
|
170 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
171 |
+
else:
|
172 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
173 |
+
|
174 |
+
if zero_stage <= 2:
|
175 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
176 |
+
elif zero_stage == 3:
|
177 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
178 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
179 |
+
#
|
180 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
181 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
182 |
+
|
183 |
+
fp32_flat_groups = [
|
184 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
185 |
+
]
|
186 |
+
|
187 |
+
return zero_stage, world_size, fp32_flat_groups
|
188 |
+
|
189 |
+
|
190 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
|
191 |
+
"""
|
192 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
193 |
+
|
194 |
+
Args:
|
195 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
196 |
+
|
197 |
+
"""
|
198 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
199 |
+
|
200 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
201 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
202 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
203 |
+
|
204 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
205 |
+
|
206 |
+
zero_model_states = parse_model_states(model_files)
|
207 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
208 |
+
|
209 |
+
if zero_stage <= 2:
|
210 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
211 |
+
elif zero_stage == 3:
|
212 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
213 |
+
|
214 |
+
|
215 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
216 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
217 |
+
return
|
218 |
+
|
219 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
220 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
221 |
+
|
222 |
+
if debug:
|
223 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
224 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
225 |
+
|
226 |
+
wanted_params = len(frozen_param_shapes)
|
227 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
228 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
229 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
230 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
231 |
+
|
232 |
+
total_params = 0
|
233 |
+
total_numel = 0
|
234 |
+
for name, shape in frozen_param_shapes.items():
|
235 |
+
total_params += 1
|
236 |
+
unpartitioned_numel = shape.numel()
|
237 |
+
total_numel += unpartitioned_numel
|
238 |
+
|
239 |
+
state_dict[name] = frozen_param_fragments[name]
|
240 |
+
|
241 |
+
if debug:
|
242 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
243 |
+
|
244 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
245 |
+
|
246 |
+
|
247 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
248 |
+
param_shapes = zero_model_states[0].param_shapes
|
249 |
+
|
250 |
+
# Reconstruction protocol:
|
251 |
+
#
|
252 |
+
# XXX: document this
|
253 |
+
|
254 |
+
if debug:
|
255 |
+
for i in range(world_size):
|
256 |
+
for j in range(len(fp32_flat_groups[0])):
|
257 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
258 |
+
|
259 |
+
# XXX: memory usage doubles here (zero2)
|
260 |
+
num_param_groups = len(fp32_flat_groups[0])
|
261 |
+
merged_single_partition_of_fp32_groups = []
|
262 |
+
for i in range(num_param_groups):
|
263 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
264 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
265 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
266 |
+
avail_numel = sum(
|
267 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
268 |
+
|
269 |
+
if debug:
|
270 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
271 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
272 |
+
# not asserting if there is a mismatch due to possible padding
|
273 |
+
print(f"Have {avail_numel} numels to process.")
|
274 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
275 |
+
|
276 |
+
# params
|
277 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
278 |
+
# out-of-core computing solution
|
279 |
+
total_numel = 0
|
280 |
+
total_params = 0
|
281 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
282 |
+
offset = 0
|
283 |
+
avail_numel = full_single_fp32_vector.numel()
|
284 |
+
for name, shape in shapes.items():
|
285 |
+
|
286 |
+
unpartitioned_numel = shape.numel()
|
287 |
+
total_numel += unpartitioned_numel
|
288 |
+
total_params += 1
|
289 |
+
|
290 |
+
if debug:
|
291 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
292 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
293 |
+
offset += unpartitioned_numel
|
294 |
+
|
295 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
296 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
297 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
298 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
299 |
+
align_to = 2 * world_size
|
300 |
+
|
301 |
+
def zero2_align(x):
|
302 |
+
return align_to * math.ceil(x / align_to)
|
303 |
+
|
304 |
+
if debug:
|
305 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
306 |
+
|
307 |
+
offset = zero2_align(offset)
|
308 |
+
avail_numel = zero2_align(avail_numel)
|
309 |
+
|
310 |
+
if debug:
|
311 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
312 |
+
|
313 |
+
# Sanity check
|
314 |
+
if offset != avail_numel:
|
315 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
316 |
+
|
317 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
318 |
+
|
319 |
+
|
320 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
321 |
+
state_dict = OrderedDict()
|
322 |
+
|
323 |
+
# buffers
|
324 |
+
buffers = zero_model_states[0].buffers
|
325 |
+
state_dict.update(buffers)
|
326 |
+
if debug:
|
327 |
+
print(f"added {len(buffers)} buffers")
|
328 |
+
|
329 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
330 |
+
|
331 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
332 |
+
|
333 |
+
# recover shared parameters
|
334 |
+
for pair in zero_model_states[0].shared_params:
|
335 |
+
if pair[1] in state_dict:
|
336 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
337 |
+
|
338 |
+
return state_dict
|
339 |
+
|
340 |
+
|
341 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
342 |
+
remainder = unpartitioned_numel % world_size
|
343 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
344 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
345 |
+
return partitioned_numel, padding_numel
|
346 |
+
|
347 |
+
|
348 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
349 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
350 |
+
return
|
351 |
+
|
352 |
+
if debug:
|
353 |
+
for i in range(world_size):
|
354 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
355 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
356 |
+
|
357 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
358 |
+
wanted_params = len(frozen_param_shapes)
|
359 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
360 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
361 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
362 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
363 |
+
|
364 |
+
total_params = 0
|
365 |
+
total_numel = 0
|
366 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
367 |
+
total_params += 1
|
368 |
+
unpartitioned_numel = shape.numel()
|
369 |
+
total_numel += unpartitioned_numel
|
370 |
+
|
371 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
372 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
373 |
+
|
374 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
375 |
+
|
376 |
+
if debug:
|
377 |
+
print(
|
378 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
379 |
+
)
|
380 |
+
|
381 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
382 |
+
|
383 |
+
|
384 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
385 |
+
param_shapes = zero_model_states[0].param_shapes
|
386 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
387 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
388 |
+
# param, re-consolidating each param, while dealing with padding if any
|
389 |
+
|
390 |
+
# merge list of dicts, preserving order
|
391 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
392 |
+
|
393 |
+
if debug:
|
394 |
+
for i in range(world_size):
|
395 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
396 |
+
|
397 |
+
wanted_params = len(param_shapes)
|
398 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
399 |
+
# not asserting if there is a mismatch due to possible padding
|
400 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
401 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
402 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
403 |
+
|
404 |
+
# params
|
405 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
406 |
+
# out-of-core computing solution
|
407 |
+
offset = 0
|
408 |
+
total_numel = 0
|
409 |
+
total_params = 0
|
410 |
+
for name, shape in param_shapes.items():
|
411 |
+
|
412 |
+
unpartitioned_numel = shape.numel()
|
413 |
+
total_numel += unpartitioned_numel
|
414 |
+
total_params += 1
|
415 |
+
|
416 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
417 |
+
|
418 |
+
if debug:
|
419 |
+
print(
|
420 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
421 |
+
)
|
422 |
+
|
423 |
+
# XXX: memory usage doubles here
|
424 |
+
state_dict[name] = torch.cat(
|
425 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
426 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
427 |
+
offset += partitioned_numel
|
428 |
+
|
429 |
+
offset *= world_size
|
430 |
+
|
431 |
+
# Sanity check
|
432 |
+
if offset != avail_numel:
|
433 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
434 |
+
|
435 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
436 |
+
|
437 |
+
|
438 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
439 |
+
state_dict = OrderedDict()
|
440 |
+
|
441 |
+
# buffers
|
442 |
+
buffers = zero_model_states[0].buffers
|
443 |
+
state_dict.update(buffers)
|
444 |
+
if debug:
|
445 |
+
print(f"added {len(buffers)} buffers")
|
446 |
+
|
447 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
448 |
+
|
449 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
450 |
+
|
451 |
+
# recover shared parameters
|
452 |
+
for pair in zero_model_states[0].shared_params:
|
453 |
+
if pair[1] in state_dict:
|
454 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
455 |
+
|
456 |
+
return state_dict
|
457 |
+
|
458 |
+
|
459 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
|
460 |
+
"""
|
461 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
462 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
463 |
+
via a model hub.
|
464 |
+
|
465 |
+
Args:
|
466 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
467 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
468 |
+
|
469 |
+
Returns:
|
470 |
+
- pytorch ``state_dict``
|
471 |
+
|
472 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
473 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
474 |
+
the checkpoint.
|
475 |
+
|
476 |
+
A typical usage might be ::
|
477 |
+
|
478 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
479 |
+
# do the training and checkpoint saving
|
480 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
481 |
+
model = model.cpu() # move to cpu
|
482 |
+
model.load_state_dict(state_dict)
|
483 |
+
# submit to model hub or save the model to share with others
|
484 |
+
|
485 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
486 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
487 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
488 |
+
|
489 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
490 |
+
|
491 |
+
"""
|
492 |
+
if tag is None:
|
493 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
494 |
+
if os.path.isfile(latest_path):
|
495 |
+
with open(latest_path, 'r') as fd:
|
496 |
+
tag = fd.read().strip()
|
497 |
+
else:
|
498 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
499 |
+
|
500 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
501 |
+
|
502 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
503 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
504 |
+
|
505 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
|
506 |
+
|
507 |
+
|
508 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
|
509 |
+
"""
|
510 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
511 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
512 |
+
|
513 |
+
Args:
|
514 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
515 |
+
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
|
516 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
517 |
+
"""
|
518 |
+
|
519 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
520 |
+
print(f"Saving fp32 state dict to {output_file}")
|
521 |
+
torch.save(state_dict, output_file)
|
522 |
+
|
523 |
+
|
524 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
525 |
+
"""
|
526 |
+
1. Put the provided model to cpu
|
527 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
528 |
+
3. Load it into the provided model
|
529 |
+
|
530 |
+
Args:
|
531 |
+
- ``model``: the model object to update
|
532 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
533 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
534 |
+
|
535 |
+
Returns:
|
536 |
+
- ``model`: modified model
|
537 |
+
|
538 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
539 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
540 |
+
conveniently placed for you in the checkpoint folder.
|
541 |
+
|
542 |
+
A typical usage might be ::
|
543 |
+
|
544 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
545 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
546 |
+
# submit to model hub or save the model to share with others
|
547 |
+
|
548 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
549 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
550 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
551 |
+
|
552 |
+
"""
|
553 |
+
logger.info(f"Extracting fp32 weights")
|
554 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
555 |
+
|
556 |
+
logger.info(f"Overwriting model with fp32 weights")
|
557 |
+
model = model.cpu()
|
558 |
+
model.load_state_dict(state_dict, strict=False)
|
559 |
+
|
560 |
+
return model
|
561 |
+
|
562 |
+
|
563 |
+
if __name__ == "__main__":
|
564 |
+
|
565 |
+
parser = argparse.ArgumentParser()
|
566 |
+
parser.add_argument("checkpoint_dir",
|
567 |
+
type=str,
|
568 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
569 |
+
parser.add_argument(
|
570 |
+
"output_file",
|
571 |
+
type=str,
|
572 |
+
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
|
573 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
574 |
+
args = parser.parse_args()
|
575 |
+
|
576 |
+
debug = args.debug
|
577 |
+
|
578 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
|
checkpoint-2500/latest
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pytorch_model
|
checkpoint-2500/pytorch_model/mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7e409ef04c48004ed62b87c47678b58f40d9a3ff0247e54202111dd838857a56
|
3 |
+
size 832858243
|
checkpoint-2500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e76aa268939383d38f39c43aa4384e36fb9cbcbefd9992e837fe1c84eeedf287
|
3 |
+
size 455356505
|
checkpoint-2500/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1a71abd5dc7026acd71f51115fe1d8209a88641a99d244c471ad41d6725fa00d
|
3 |
+
size 15691
|
checkpoint-2500/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:496e5a35d8686f4bf9ce59c376f5f501ec4a1034cf3b70deb1f3d2c047fab2d4
|
3 |
+
size 563
|
checkpoint-2500/zero_to_fp32.py
ADDED
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
# Copyright (c) Microsoft Corporation.
|
4 |
+
# SPDX-License-Identifier: Apache-2.0
|
5 |
+
|
6 |
+
# DeepSpeed Team
|
7 |
+
|
8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
11 |
+
# application.
|
12 |
+
#
|
13 |
+
# example: python zero_to_fp32.py . pytorch_model.bin
|
14 |
+
|
15 |
+
import argparse
|
16 |
+
import torch
|
17 |
+
import glob
|
18 |
+
import math
|
19 |
+
import os
|
20 |
+
import re
|
21 |
+
from collections import OrderedDict
|
22 |
+
from dataclasses import dataclass
|
23 |
+
|
24 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
25 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
26 |
+
from deepspeed.utils import logger
|
27 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
28 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
29 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class zero_model_state:
|
34 |
+
buffers: dict()
|
35 |
+
param_shapes: dict()
|
36 |
+
shared_params: list
|
37 |
+
ds_version: int
|
38 |
+
frozen_param_shapes: dict()
|
39 |
+
frozen_param_fragments: dict()
|
40 |
+
|
41 |
+
|
42 |
+
debug = 0
|
43 |
+
|
44 |
+
# load to cpu
|
45 |
+
device = torch.device('cpu')
|
46 |
+
|
47 |
+
|
48 |
+
def atoi(text):
|
49 |
+
return int(text) if text.isdigit() else text
|
50 |
+
|
51 |
+
|
52 |
+
def natural_keys(text):
|
53 |
+
'''
|
54 |
+
alist.sort(key=natural_keys) sorts in human order
|
55 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
56 |
+
(See Toothy's implementation in the comments)
|
57 |
+
'''
|
58 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
59 |
+
|
60 |
+
|
61 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
62 |
+
if not os.path.isdir(checkpoint_dir):
|
63 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
64 |
+
|
65 |
+
# there should be only one file
|
66 |
+
if zero_stage <= 2:
|
67 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
68 |
+
elif zero_stage == 3:
|
69 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
70 |
+
|
71 |
+
if not os.path.exists(file):
|
72 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
73 |
+
|
74 |
+
return file
|
75 |
+
|
76 |
+
|
77 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
78 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
79 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
80 |
+
|
81 |
+
if len(ckpt_files) == 0:
|
82 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
83 |
+
|
84 |
+
return ckpt_files
|
85 |
+
|
86 |
+
|
87 |
+
def get_optim_files(checkpoint_dir):
|
88 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
89 |
+
|
90 |
+
|
91 |
+
def get_model_state_files(checkpoint_dir):
|
92 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
93 |
+
|
94 |
+
|
95 |
+
def parse_model_states(files):
|
96 |
+
zero_model_states = []
|
97 |
+
for file in files:
|
98 |
+
state_dict = torch.load(file, map_location=device)
|
99 |
+
|
100 |
+
if BUFFER_NAMES not in state_dict:
|
101 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
102 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
103 |
+
if debug:
|
104 |
+
print("Found buffers:", buffer_names)
|
105 |
+
|
106 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
107 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
108 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
109 |
+
|
110 |
+
# collect parameters that are included in param_shapes
|
111 |
+
param_names = []
|
112 |
+
for s in param_shapes:
|
113 |
+
for name in s.keys():
|
114 |
+
param_names.append(name)
|
115 |
+
|
116 |
+
# update with frozen parameters
|
117 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
118 |
+
if frozen_param_shapes is not None:
|
119 |
+
if debug:
|
120 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
121 |
+
param_names += list(frozen_param_shapes.keys())
|
122 |
+
|
123 |
+
# handle shared params
|
124 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
125 |
+
|
126 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
127 |
+
|
128 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
129 |
+
|
130 |
+
z_model_state = zero_model_state(buffers=buffers,
|
131 |
+
param_shapes=param_shapes,
|
132 |
+
shared_params=shared_params,
|
133 |
+
ds_version=ds_version,
|
134 |
+
frozen_param_shapes=frozen_param_shapes,
|
135 |
+
frozen_param_fragments=frozen_param_fragments)
|
136 |
+
zero_model_states.append(z_model_state)
|
137 |
+
|
138 |
+
return zero_model_states
|
139 |
+
|
140 |
+
|
141 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
142 |
+
|
143 |
+
total_files = len(files)
|
144 |
+
state_dicts = []
|
145 |
+
for f in files:
|
146 |
+
state_dicts.append(torch.load(f, map_location=device))
|
147 |
+
|
148 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
149 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
150 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
151 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
152 |
+
|
153 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
154 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
155 |
+
# use the max of the partition_count to get the dp world_size.
|
156 |
+
|
157 |
+
if type(world_size) is list:
|
158 |
+
world_size = max(world_size)
|
159 |
+
|
160 |
+
if world_size != total_files:
|
161 |
+
raise ValueError(
|
162 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
163 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
164 |
+
)
|
165 |
+
|
166 |
+
# the groups are named differently in each stage
|
167 |
+
if zero_stage <= 2:
|
168 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
169 |
+
elif zero_stage == 3:
|
170 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
171 |
+
else:
|
172 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
173 |
+
|
174 |
+
if zero_stage <= 2:
|
175 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
176 |
+
elif zero_stage == 3:
|
177 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
178 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
179 |
+
#
|
180 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
181 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
182 |
+
|
183 |
+
fp32_flat_groups = [
|
184 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
185 |
+
]
|
186 |
+
|
187 |
+
return zero_stage, world_size, fp32_flat_groups
|
188 |
+
|
189 |
+
|
190 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
|
191 |
+
"""
|
192 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
193 |
+
|
194 |
+
Args:
|
195 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
196 |
+
|
197 |
+
"""
|
198 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
199 |
+
|
200 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
201 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
202 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
203 |
+
|
204 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
205 |
+
|
206 |
+
zero_model_states = parse_model_states(model_files)
|
207 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
208 |
+
|
209 |
+
if zero_stage <= 2:
|
210 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
211 |
+
elif zero_stage == 3:
|
212 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
213 |
+
|
214 |
+
|
215 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
216 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
217 |
+
return
|
218 |
+
|
219 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
220 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
221 |
+
|
222 |
+
if debug:
|
223 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
224 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
225 |
+
|
226 |
+
wanted_params = len(frozen_param_shapes)
|
227 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
228 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
229 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
230 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
231 |
+
|
232 |
+
total_params = 0
|
233 |
+
total_numel = 0
|
234 |
+
for name, shape in frozen_param_shapes.items():
|
235 |
+
total_params += 1
|
236 |
+
unpartitioned_numel = shape.numel()
|
237 |
+
total_numel += unpartitioned_numel
|
238 |
+
|
239 |
+
state_dict[name] = frozen_param_fragments[name]
|
240 |
+
|
241 |
+
if debug:
|
242 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
243 |
+
|
244 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
245 |
+
|
246 |
+
|
247 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
248 |
+
param_shapes = zero_model_states[0].param_shapes
|
249 |
+
|
250 |
+
# Reconstruction protocol:
|
251 |
+
#
|
252 |
+
# XXX: document this
|
253 |
+
|
254 |
+
if debug:
|
255 |
+
for i in range(world_size):
|
256 |
+
for j in range(len(fp32_flat_groups[0])):
|
257 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
258 |
+
|
259 |
+
# XXX: memory usage doubles here (zero2)
|
260 |
+
num_param_groups = len(fp32_flat_groups[0])
|
261 |
+
merged_single_partition_of_fp32_groups = []
|
262 |
+
for i in range(num_param_groups):
|
263 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
264 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
265 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
266 |
+
avail_numel = sum(
|
267 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
268 |
+
|
269 |
+
if debug:
|
270 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
271 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
272 |
+
# not asserting if there is a mismatch due to possible padding
|
273 |
+
print(f"Have {avail_numel} numels to process.")
|
274 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
275 |
+
|
276 |
+
# params
|
277 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
278 |
+
# out-of-core computing solution
|
279 |
+
total_numel = 0
|
280 |
+
total_params = 0
|
281 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
282 |
+
offset = 0
|
283 |
+
avail_numel = full_single_fp32_vector.numel()
|
284 |
+
for name, shape in shapes.items():
|
285 |
+
|
286 |
+
unpartitioned_numel = shape.numel()
|
287 |
+
total_numel += unpartitioned_numel
|
288 |
+
total_params += 1
|
289 |
+
|
290 |
+
if debug:
|
291 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
292 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
293 |
+
offset += unpartitioned_numel
|
294 |
+
|
295 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
296 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
297 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
298 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
299 |
+
align_to = 2 * world_size
|
300 |
+
|
301 |
+
def zero2_align(x):
|
302 |
+
return align_to * math.ceil(x / align_to)
|
303 |
+
|
304 |
+
if debug:
|
305 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
306 |
+
|
307 |
+
offset = zero2_align(offset)
|
308 |
+
avail_numel = zero2_align(avail_numel)
|
309 |
+
|
310 |
+
if debug:
|
311 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
312 |
+
|
313 |
+
# Sanity check
|
314 |
+
if offset != avail_numel:
|
315 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
316 |
+
|
317 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
318 |
+
|
319 |
+
|
320 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
321 |
+
state_dict = OrderedDict()
|
322 |
+
|
323 |
+
# buffers
|
324 |
+
buffers = zero_model_states[0].buffers
|
325 |
+
state_dict.update(buffers)
|
326 |
+
if debug:
|
327 |
+
print(f"added {len(buffers)} buffers")
|
328 |
+
|
329 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
330 |
+
|
331 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
332 |
+
|
333 |
+
# recover shared parameters
|
334 |
+
for pair in zero_model_states[0].shared_params:
|
335 |
+
if pair[1] in state_dict:
|
336 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
337 |
+
|
338 |
+
return state_dict
|
339 |
+
|
340 |
+
|
341 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
342 |
+
remainder = unpartitioned_numel % world_size
|
343 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
344 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
345 |
+
return partitioned_numel, padding_numel
|
346 |
+
|
347 |
+
|
348 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
349 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
350 |
+
return
|
351 |
+
|
352 |
+
if debug:
|
353 |
+
for i in range(world_size):
|
354 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
355 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
356 |
+
|
357 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
358 |
+
wanted_params = len(frozen_param_shapes)
|
359 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
360 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
361 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
362 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
363 |
+
|
364 |
+
total_params = 0
|
365 |
+
total_numel = 0
|
366 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
367 |
+
total_params += 1
|
368 |
+
unpartitioned_numel = shape.numel()
|
369 |
+
total_numel += unpartitioned_numel
|
370 |
+
|
371 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
372 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
373 |
+
|
374 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
375 |
+
|
376 |
+
if debug:
|
377 |
+
print(
|
378 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
379 |
+
)
|
380 |
+
|
381 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
382 |
+
|
383 |
+
|
384 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
385 |
+
param_shapes = zero_model_states[0].param_shapes
|
386 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
387 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
388 |
+
# param, re-consolidating each param, while dealing with padding if any
|
389 |
+
|
390 |
+
# merge list of dicts, preserving order
|
391 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
392 |
+
|
393 |
+
if debug:
|
394 |
+
for i in range(world_size):
|
395 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
396 |
+
|
397 |
+
wanted_params = len(param_shapes)
|
398 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
399 |
+
# not asserting if there is a mismatch due to possible padding
|
400 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
401 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
402 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
403 |
+
|
404 |
+
# params
|
405 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
406 |
+
# out-of-core computing solution
|
407 |
+
offset = 0
|
408 |
+
total_numel = 0
|
409 |
+
total_params = 0
|
410 |
+
for name, shape in param_shapes.items():
|
411 |
+
|
412 |
+
unpartitioned_numel = shape.numel()
|
413 |
+
total_numel += unpartitioned_numel
|
414 |
+
total_params += 1
|
415 |
+
|
416 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
417 |
+
|
418 |
+
if debug:
|
419 |
+
print(
|
420 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
421 |
+
)
|
422 |
+
|
423 |
+
# XXX: memory usage doubles here
|
424 |
+
state_dict[name] = torch.cat(
|
425 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
426 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
427 |
+
offset += partitioned_numel
|
428 |
+
|
429 |
+
offset *= world_size
|
430 |
+
|
431 |
+
# Sanity check
|
432 |
+
if offset != avail_numel:
|
433 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
434 |
+
|
435 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
436 |
+
|
437 |
+
|
438 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
439 |
+
state_dict = OrderedDict()
|
440 |
+
|
441 |
+
# buffers
|
442 |
+
buffers = zero_model_states[0].buffers
|
443 |
+
state_dict.update(buffers)
|
444 |
+
if debug:
|
445 |
+
print(f"added {len(buffers)} buffers")
|
446 |
+
|
447 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
448 |
+
|
449 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
450 |
+
|
451 |
+
# recover shared parameters
|
452 |
+
for pair in zero_model_states[0].shared_params:
|
453 |
+
if pair[1] in state_dict:
|
454 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
455 |
+
|
456 |
+
return state_dict
|
457 |
+
|
458 |
+
|
459 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
|
460 |
+
"""
|
461 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
462 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
463 |
+
via a model hub.
|
464 |
+
|
465 |
+
Args:
|
466 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
467 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
468 |
+
|
469 |
+
Returns:
|
470 |
+
- pytorch ``state_dict``
|
471 |
+
|
472 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
473 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
474 |
+
the checkpoint.
|
475 |
+
|
476 |
+
A typical usage might be ::
|
477 |
+
|
478 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
479 |
+
# do the training and checkpoint saving
|
480 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
481 |
+
model = model.cpu() # move to cpu
|
482 |
+
model.load_state_dict(state_dict)
|
483 |
+
# submit to model hub or save the model to share with others
|
484 |
+
|
485 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
486 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
487 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
488 |
+
|
489 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
490 |
+
|
491 |
+
"""
|
492 |
+
if tag is None:
|
493 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
494 |
+
if os.path.isfile(latest_path):
|
495 |
+
with open(latest_path, 'r') as fd:
|
496 |
+
tag = fd.read().strip()
|
497 |
+
else:
|
498 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
499 |
+
|
500 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
501 |
+
|
502 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
503 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
504 |
+
|
505 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
|
506 |
+
|
507 |
+
|
508 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
|
509 |
+
"""
|
510 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
511 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
512 |
+
|
513 |
+
Args:
|
514 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
515 |
+
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
|
516 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
517 |
+
"""
|
518 |
+
|
519 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
520 |
+
print(f"Saving fp32 state dict to {output_file}")
|
521 |
+
torch.save(state_dict, output_file)
|
522 |
+
|
523 |
+
|
524 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
525 |
+
"""
|
526 |
+
1. Put the provided model to cpu
|
527 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
528 |
+
3. Load it into the provided model
|
529 |
+
|
530 |
+
Args:
|
531 |
+
- ``model``: the model object to update
|
532 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
533 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
534 |
+
|
535 |
+
Returns:
|
536 |
+
- ``model`: modified model
|
537 |
+
|
538 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
539 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
540 |
+
conveniently placed for you in the checkpoint folder.
|
541 |
+
|
542 |
+
A typical usage might be ::
|
543 |
+
|
544 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
545 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
546 |
+
# submit to model hub or save the model to share with others
|
547 |
+
|
548 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
549 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
550 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
551 |
+
|
552 |
+
"""
|
553 |
+
logger.info(f"Extracting fp32 weights")
|
554 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
555 |
+
|
556 |
+
logger.info(f"Overwriting model with fp32 weights")
|
557 |
+
model = model.cpu()
|
558 |
+
model.load_state_dict(state_dict, strict=False)
|
559 |
+
|
560 |
+
return model
|
561 |
+
|
562 |
+
|
563 |
+
if __name__ == "__main__":
|
564 |
+
|
565 |
+
parser = argparse.ArgumentParser()
|
566 |
+
parser.add_argument("checkpoint_dir",
|
567 |
+
type=str,
|
568 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
569 |
+
parser.add_argument(
|
570 |
+
"output_file",
|
571 |
+
type=str,
|
572 |
+
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
|
573 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
574 |
+
args = parser.parse_args()
|
575 |
+
|
576 |
+
debug = args.debug
|
577 |
+
|
578 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
|
checkpoint-3000/latest
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pytorch_model
|
checkpoint-3000/pytorch_model/mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1540bd304fc1fa00f9690efc95b38daff488a79ddd33024bbe7a935da167fbc5
|
3 |
+
size 832858243
|
checkpoint-3000/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2bc7b0b4544b4d08187d1bce3902145bde2da20face446ae600d86c3a94bada4
|
3 |
+
size 455356505
|
checkpoint-3000/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aefb8ecbaecc6f899ce3b3b66c52dea36305ceaea99f466169cb72f70d330170
|
3 |
+
size 15755
|
checkpoint-3000/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2056b541ceef5d77999a48951b85854f6503b83d58bc070e970ec3531e56e169
|
3 |
+
size 563
|
checkpoint-3000/zero_to_fp32.py
ADDED
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
# Copyright (c) Microsoft Corporation.
|
4 |
+
# SPDX-License-Identifier: Apache-2.0
|
5 |
+
|
6 |
+
# DeepSpeed Team
|
7 |
+
|
8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
11 |
+
# application.
|
12 |
+
#
|
13 |
+
# example: python zero_to_fp32.py . pytorch_model.bin
|
14 |
+
|
15 |
+
import argparse
|
16 |
+
import torch
|
17 |
+
import glob
|
18 |
+
import math
|
19 |
+
import os
|
20 |
+
import re
|
21 |
+
from collections import OrderedDict
|
22 |
+
from dataclasses import dataclass
|
23 |
+
|
24 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
25 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
26 |
+
from deepspeed.utils import logger
|
27 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
28 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
29 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class zero_model_state:
|
34 |
+
buffers: dict()
|
35 |
+
param_shapes: dict()
|
36 |
+
shared_params: list
|
37 |
+
ds_version: int
|
38 |
+
frozen_param_shapes: dict()
|
39 |
+
frozen_param_fragments: dict()
|
40 |
+
|
41 |
+
|
42 |
+
debug = 0
|
43 |
+
|
44 |
+
# load to cpu
|
45 |
+
device = torch.device('cpu')
|
46 |
+
|
47 |
+
|
48 |
+
def atoi(text):
|
49 |
+
return int(text) if text.isdigit() else text
|
50 |
+
|
51 |
+
|
52 |
+
def natural_keys(text):
|
53 |
+
'''
|
54 |
+
alist.sort(key=natural_keys) sorts in human order
|
55 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
56 |
+
(See Toothy's implementation in the comments)
|
57 |
+
'''
|
58 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
59 |
+
|
60 |
+
|
61 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
62 |
+
if not os.path.isdir(checkpoint_dir):
|
63 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
64 |
+
|
65 |
+
# there should be only one file
|
66 |
+
if zero_stage <= 2:
|
67 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
68 |
+
elif zero_stage == 3:
|
69 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
70 |
+
|
71 |
+
if not os.path.exists(file):
|
72 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
73 |
+
|
74 |
+
return file
|
75 |
+
|
76 |
+
|
77 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
78 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
79 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
80 |
+
|
81 |
+
if len(ckpt_files) == 0:
|
82 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
83 |
+
|
84 |
+
return ckpt_files
|
85 |
+
|
86 |
+
|
87 |
+
def get_optim_files(checkpoint_dir):
|
88 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
89 |
+
|
90 |
+
|
91 |
+
def get_model_state_files(checkpoint_dir):
|
92 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
93 |
+
|
94 |
+
|
95 |
+
def parse_model_states(files):
|
96 |
+
zero_model_states = []
|
97 |
+
for file in files:
|
98 |
+
state_dict = torch.load(file, map_location=device)
|
99 |
+
|
100 |
+
if BUFFER_NAMES not in state_dict:
|
101 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
102 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
103 |
+
if debug:
|
104 |
+
print("Found buffers:", buffer_names)
|
105 |
+
|
106 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
107 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
108 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
109 |
+
|
110 |
+
# collect parameters that are included in param_shapes
|
111 |
+
param_names = []
|
112 |
+
for s in param_shapes:
|
113 |
+
for name in s.keys():
|
114 |
+
param_names.append(name)
|
115 |
+
|
116 |
+
# update with frozen parameters
|
117 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
118 |
+
if frozen_param_shapes is not None:
|
119 |
+
if debug:
|
120 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
121 |
+
param_names += list(frozen_param_shapes.keys())
|
122 |
+
|
123 |
+
# handle shared params
|
124 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
125 |
+
|
126 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
127 |
+
|
128 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
129 |
+
|
130 |
+
z_model_state = zero_model_state(buffers=buffers,
|
131 |
+
param_shapes=param_shapes,
|
132 |
+
shared_params=shared_params,
|
133 |
+
ds_version=ds_version,
|
134 |
+
frozen_param_shapes=frozen_param_shapes,
|
135 |
+
frozen_param_fragments=frozen_param_fragments)
|
136 |
+
zero_model_states.append(z_model_state)
|
137 |
+
|
138 |
+
return zero_model_states
|
139 |
+
|
140 |
+
|
141 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
142 |
+
|
143 |
+
total_files = len(files)
|
144 |
+
state_dicts = []
|
145 |
+
for f in files:
|
146 |
+
state_dicts.append(torch.load(f, map_location=device))
|
147 |
+
|
148 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
149 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
150 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
151 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
152 |
+
|
153 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
154 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
155 |
+
# use the max of the partition_count to get the dp world_size.
|
156 |
+
|
157 |
+
if type(world_size) is list:
|
158 |
+
world_size = max(world_size)
|
159 |
+
|
160 |
+
if world_size != total_files:
|
161 |
+
raise ValueError(
|
162 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
163 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
164 |
+
)
|
165 |
+
|
166 |
+
# the groups are named differently in each stage
|
167 |
+
if zero_stage <= 2:
|
168 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
169 |
+
elif zero_stage == 3:
|
170 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
171 |
+
else:
|
172 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
173 |
+
|
174 |
+
if zero_stage <= 2:
|
175 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
176 |
+
elif zero_stage == 3:
|
177 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
178 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
179 |
+
#
|
180 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
181 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
182 |
+
|
183 |
+
fp32_flat_groups = [
|
184 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
185 |
+
]
|
186 |
+
|
187 |
+
return zero_stage, world_size, fp32_flat_groups
|
188 |
+
|
189 |
+
|
190 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
|
191 |
+
"""
|
192 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
193 |
+
|
194 |
+
Args:
|
195 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
196 |
+
|
197 |
+
"""
|
198 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
199 |
+
|
200 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
201 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
202 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
203 |
+
|
204 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
205 |
+
|
206 |
+
zero_model_states = parse_model_states(model_files)
|
207 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
208 |
+
|
209 |
+
if zero_stage <= 2:
|
210 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
211 |
+
elif zero_stage == 3:
|
212 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
213 |
+
|
214 |
+
|
215 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
216 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
217 |
+
return
|
218 |
+
|
219 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
220 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
221 |
+
|
222 |
+
if debug:
|
223 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
224 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
225 |
+
|
226 |
+
wanted_params = len(frozen_param_shapes)
|
227 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
228 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
229 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
230 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
231 |
+
|
232 |
+
total_params = 0
|
233 |
+
total_numel = 0
|
234 |
+
for name, shape in frozen_param_shapes.items():
|
235 |
+
total_params += 1
|
236 |
+
unpartitioned_numel = shape.numel()
|
237 |
+
total_numel += unpartitioned_numel
|
238 |
+
|
239 |
+
state_dict[name] = frozen_param_fragments[name]
|
240 |
+
|
241 |
+
if debug:
|
242 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
243 |
+
|
244 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
245 |
+
|
246 |
+
|
247 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
248 |
+
param_shapes = zero_model_states[0].param_shapes
|
249 |
+
|
250 |
+
# Reconstruction protocol:
|
251 |
+
#
|
252 |
+
# XXX: document this
|
253 |
+
|
254 |
+
if debug:
|
255 |
+
for i in range(world_size):
|
256 |
+
for j in range(len(fp32_flat_groups[0])):
|
257 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
258 |
+
|
259 |
+
# XXX: memory usage doubles here (zero2)
|
260 |
+
num_param_groups = len(fp32_flat_groups[0])
|
261 |
+
merged_single_partition_of_fp32_groups = []
|
262 |
+
for i in range(num_param_groups):
|
263 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
264 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
265 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
266 |
+
avail_numel = sum(
|
267 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
268 |
+
|
269 |
+
if debug:
|
270 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
271 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
272 |
+
# not asserting if there is a mismatch due to possible padding
|
273 |
+
print(f"Have {avail_numel} numels to process.")
|
274 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
275 |
+
|
276 |
+
# params
|
277 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
278 |
+
# out-of-core computing solution
|
279 |
+
total_numel = 0
|
280 |
+
total_params = 0
|
281 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
282 |
+
offset = 0
|
283 |
+
avail_numel = full_single_fp32_vector.numel()
|
284 |
+
for name, shape in shapes.items():
|
285 |
+
|
286 |
+
unpartitioned_numel = shape.numel()
|
287 |
+
total_numel += unpartitioned_numel
|
288 |
+
total_params += 1
|
289 |
+
|
290 |
+
if debug:
|
291 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
292 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
293 |
+
offset += unpartitioned_numel
|
294 |
+
|
295 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
296 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
297 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
298 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
299 |
+
align_to = 2 * world_size
|
300 |
+
|
301 |
+
def zero2_align(x):
|
302 |
+
return align_to * math.ceil(x / align_to)
|
303 |
+
|
304 |
+
if debug:
|
305 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
306 |
+
|
307 |
+
offset = zero2_align(offset)
|
308 |
+
avail_numel = zero2_align(avail_numel)
|
309 |
+
|
310 |
+
if debug:
|
311 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
312 |
+
|
313 |
+
# Sanity check
|
314 |
+
if offset != avail_numel:
|
315 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
316 |
+
|
317 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
318 |
+
|
319 |
+
|
320 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
321 |
+
state_dict = OrderedDict()
|
322 |
+
|
323 |
+
# buffers
|
324 |
+
buffers = zero_model_states[0].buffers
|
325 |
+
state_dict.update(buffers)
|
326 |
+
if debug:
|
327 |
+
print(f"added {len(buffers)} buffers")
|
328 |
+
|
329 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
330 |
+
|
331 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
332 |
+
|
333 |
+
# recover shared parameters
|
334 |
+
for pair in zero_model_states[0].shared_params:
|
335 |
+
if pair[1] in state_dict:
|
336 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
337 |
+
|
338 |
+
return state_dict
|
339 |
+
|
340 |
+
|
341 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
342 |
+
remainder = unpartitioned_numel % world_size
|
343 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
344 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
345 |
+
return partitioned_numel, padding_numel
|
346 |
+
|
347 |
+
|
348 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
349 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
350 |
+
return
|
351 |
+
|
352 |
+
if debug:
|
353 |
+
for i in range(world_size):
|
354 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
355 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
356 |
+
|
357 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
358 |
+
wanted_params = len(frozen_param_shapes)
|
359 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
360 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
361 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
362 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
363 |
+
|
364 |
+
total_params = 0
|
365 |
+
total_numel = 0
|
366 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
367 |
+
total_params += 1
|
368 |
+
unpartitioned_numel = shape.numel()
|
369 |
+
total_numel += unpartitioned_numel
|
370 |
+
|
371 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
372 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
373 |
+
|
374 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
375 |
+
|
376 |
+
if debug:
|
377 |
+
print(
|
378 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
379 |
+
)
|
380 |
+
|
381 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
382 |
+
|
383 |
+
|
384 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
385 |
+
param_shapes = zero_model_states[0].param_shapes
|
386 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
387 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
388 |
+
# param, re-consolidating each param, while dealing with padding if any
|
389 |
+
|
390 |
+
# merge list of dicts, preserving order
|
391 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
392 |
+
|
393 |
+
if debug:
|
394 |
+
for i in range(world_size):
|
395 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
396 |
+
|
397 |
+
wanted_params = len(param_shapes)
|
398 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
399 |
+
# not asserting if there is a mismatch due to possible padding
|
400 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
401 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
402 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
403 |
+
|
404 |
+
# params
|
405 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
406 |
+
# out-of-core computing solution
|
407 |
+
offset = 0
|
408 |
+
total_numel = 0
|
409 |
+
total_params = 0
|
410 |
+
for name, shape in param_shapes.items():
|
411 |
+
|
412 |
+
unpartitioned_numel = shape.numel()
|
413 |
+
total_numel += unpartitioned_numel
|
414 |
+
total_params += 1
|
415 |
+
|
416 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
417 |
+
|
418 |
+
if debug:
|
419 |
+
print(
|
420 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
421 |
+
)
|
422 |
+
|
423 |
+
# XXX: memory usage doubles here
|
424 |
+
state_dict[name] = torch.cat(
|
425 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
426 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
427 |
+
offset += partitioned_numel
|
428 |
+
|
429 |
+
offset *= world_size
|
430 |
+
|
431 |
+
# Sanity check
|
432 |
+
if offset != avail_numel:
|
433 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
434 |
+
|
435 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
436 |
+
|
437 |
+
|
438 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
439 |
+
state_dict = OrderedDict()
|
440 |
+
|
441 |
+
# buffers
|
442 |
+
buffers = zero_model_states[0].buffers
|
443 |
+
state_dict.update(buffers)
|
444 |
+
if debug:
|
445 |
+
print(f"added {len(buffers)} buffers")
|
446 |
+
|
447 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
448 |
+
|
449 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
450 |
+
|
451 |
+
# recover shared parameters
|
452 |
+
for pair in zero_model_states[0].shared_params:
|
453 |
+
if pair[1] in state_dict:
|
454 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
455 |
+
|
456 |
+
return state_dict
|
457 |
+
|
458 |
+
|
459 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
|
460 |
+
"""
|
461 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
462 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
463 |
+
via a model hub.
|
464 |
+
|
465 |
+
Args:
|
466 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
467 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
468 |
+
|
469 |
+
Returns:
|
470 |
+
- pytorch ``state_dict``
|
471 |
+
|
472 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
473 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
474 |
+
the checkpoint.
|
475 |
+
|
476 |
+
A typical usage might be ::
|
477 |
+
|
478 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
479 |
+
# do the training and checkpoint saving
|
480 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
481 |
+
model = model.cpu() # move to cpu
|
482 |
+
model.load_state_dict(state_dict)
|
483 |
+
# submit to model hub or save the model to share with others
|
484 |
+
|
485 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
486 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
487 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
488 |
+
|
489 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
490 |
+
|
491 |
+
"""
|
492 |
+
if tag is None:
|
493 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
494 |
+
if os.path.isfile(latest_path):
|
495 |
+
with open(latest_path, 'r') as fd:
|
496 |
+
tag = fd.read().strip()
|
497 |
+
else:
|
498 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
499 |
+
|
500 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
501 |
+
|
502 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
503 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
504 |
+
|
505 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
|
506 |
+
|
507 |
+
|
508 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
|
509 |
+
"""
|
510 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
511 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
512 |
+
|
513 |
+
Args:
|
514 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
515 |
+
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
|
516 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
517 |
+
"""
|
518 |
+
|
519 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
520 |
+
print(f"Saving fp32 state dict to {output_file}")
|
521 |
+
torch.save(state_dict, output_file)
|
522 |
+
|
523 |
+
|
524 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
525 |
+
"""
|
526 |
+
1. Put the provided model to cpu
|
527 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
528 |
+
3. Load it into the provided model
|
529 |
+
|
530 |
+
Args:
|
531 |
+
- ``model``: the model object to update
|
532 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
533 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
534 |
+
|
535 |
+
Returns:
|
536 |
+
- ``model`: modified model
|
537 |
+
|
538 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
539 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
540 |
+
conveniently placed for you in the checkpoint folder.
|
541 |
+
|
542 |
+
A typical usage might be ::
|
543 |
+
|
544 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
545 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
546 |
+
# submit to model hub or save the model to share with others
|
547 |
+
|
548 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
549 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
550 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
551 |
+
|
552 |
+
"""
|
553 |
+
logger.info(f"Extracting fp32 weights")
|
554 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
555 |
+
|
556 |
+
logger.info(f"Overwriting model with fp32 weights")
|
557 |
+
model = model.cpu()
|
558 |
+
model.load_state_dict(state_dict, strict=False)
|
559 |
+
|
560 |
+
return model
|
561 |
+
|
562 |
+
|
563 |
+
if __name__ == "__main__":
|
564 |
+
|
565 |
+
parser = argparse.ArgumentParser()
|
566 |
+
parser.add_argument("checkpoint_dir",
|
567 |
+
type=str,
|
568 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
569 |
+
parser.add_argument(
|
570 |
+
"output_file",
|
571 |
+
type=str,
|
572 |
+
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
|
573 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
574 |
+
args = parser.parse_args()
|
575 |
+
|
576 |
+
debug = args.debug
|
577 |
+
|
578 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
|
checkpoint-500/latest
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pytorch_model
|
checkpoint-500/pytorch_model/mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1e83372cb2014f858af70e5b6a158d0f583edcc5ffb98b6fd530899dccd723c5
|
3 |
+
size 832858243
|
checkpoint-500/pytorch_model/zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:014b8653f62286ebe22069609180b4a502045fda8567b48949499159b595ee8f
|
3 |
+
size 455356505
|
checkpoint-500/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1bcf27f1e4bac82c022e7429db45d3cb7df81a13c16f385ae355c3c09020be84
|
3 |
+
size 15755
|
checkpoint-500/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:478ff7b9adcb1fba8f74abd29e0dfb41133888ecf608d07ea8ae2143e019d098
|
3 |
+
size 563
|
checkpoint-500/zero_to_fp32.py
ADDED
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
# Copyright (c) Microsoft Corporation.
|
4 |
+
# SPDX-License-Identifier: Apache-2.0
|
5 |
+
|
6 |
+
# DeepSpeed Team
|
7 |
+
|
8 |
+
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
9 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
10 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
11 |
+
# application.
|
12 |
+
#
|
13 |
+
# example: python zero_to_fp32.py . pytorch_model.bin
|
14 |
+
|
15 |
+
import argparse
|
16 |
+
import torch
|
17 |
+
import glob
|
18 |
+
import math
|
19 |
+
import os
|
20 |
+
import re
|
21 |
+
from collections import OrderedDict
|
22 |
+
from dataclasses import dataclass
|
23 |
+
|
24 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
25 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
26 |
+
from deepspeed.utils import logger
|
27 |
+
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
28 |
+
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
29 |
+
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class zero_model_state:
|
34 |
+
buffers: dict()
|
35 |
+
param_shapes: dict()
|
36 |
+
shared_params: list
|
37 |
+
ds_version: int
|
38 |
+
frozen_param_shapes: dict()
|
39 |
+
frozen_param_fragments: dict()
|
40 |
+
|
41 |
+
|
42 |
+
debug = 0
|
43 |
+
|
44 |
+
# load to cpu
|
45 |
+
device = torch.device('cpu')
|
46 |
+
|
47 |
+
|
48 |
+
def atoi(text):
|
49 |
+
return int(text) if text.isdigit() else text
|
50 |
+
|
51 |
+
|
52 |
+
def natural_keys(text):
|
53 |
+
'''
|
54 |
+
alist.sort(key=natural_keys) sorts in human order
|
55 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
56 |
+
(See Toothy's implementation in the comments)
|
57 |
+
'''
|
58 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
59 |
+
|
60 |
+
|
61 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
62 |
+
if not os.path.isdir(checkpoint_dir):
|
63 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
64 |
+
|
65 |
+
# there should be only one file
|
66 |
+
if zero_stage <= 2:
|
67 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
68 |
+
elif zero_stage == 3:
|
69 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
70 |
+
|
71 |
+
if not os.path.exists(file):
|
72 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
73 |
+
|
74 |
+
return file
|
75 |
+
|
76 |
+
|
77 |
+
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
78 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
79 |
+
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
80 |
+
|
81 |
+
if len(ckpt_files) == 0:
|
82 |
+
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
83 |
+
|
84 |
+
return ckpt_files
|
85 |
+
|
86 |
+
|
87 |
+
def get_optim_files(checkpoint_dir):
|
88 |
+
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
89 |
+
|
90 |
+
|
91 |
+
def get_model_state_files(checkpoint_dir):
|
92 |
+
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
93 |
+
|
94 |
+
|
95 |
+
def parse_model_states(files):
|
96 |
+
zero_model_states = []
|
97 |
+
for file in files:
|
98 |
+
state_dict = torch.load(file, map_location=device)
|
99 |
+
|
100 |
+
if BUFFER_NAMES not in state_dict:
|
101 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
102 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
103 |
+
if debug:
|
104 |
+
print("Found buffers:", buffer_names)
|
105 |
+
|
106 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
107 |
+
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
108 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
109 |
+
|
110 |
+
# collect parameters that are included in param_shapes
|
111 |
+
param_names = []
|
112 |
+
for s in param_shapes:
|
113 |
+
for name in s.keys():
|
114 |
+
param_names.append(name)
|
115 |
+
|
116 |
+
# update with frozen parameters
|
117 |
+
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
118 |
+
if frozen_param_shapes is not None:
|
119 |
+
if debug:
|
120 |
+
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
121 |
+
param_names += list(frozen_param_shapes.keys())
|
122 |
+
|
123 |
+
# handle shared params
|
124 |
+
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
125 |
+
|
126 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
127 |
+
|
128 |
+
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
129 |
+
|
130 |
+
z_model_state = zero_model_state(buffers=buffers,
|
131 |
+
param_shapes=param_shapes,
|
132 |
+
shared_params=shared_params,
|
133 |
+
ds_version=ds_version,
|
134 |
+
frozen_param_shapes=frozen_param_shapes,
|
135 |
+
frozen_param_fragments=frozen_param_fragments)
|
136 |
+
zero_model_states.append(z_model_state)
|
137 |
+
|
138 |
+
return zero_model_states
|
139 |
+
|
140 |
+
|
141 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
142 |
+
|
143 |
+
total_files = len(files)
|
144 |
+
state_dicts = []
|
145 |
+
for f in files:
|
146 |
+
state_dicts.append(torch.load(f, map_location=device))
|
147 |
+
|
148 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
149 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
150 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
151 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
152 |
+
|
153 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
154 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
155 |
+
# use the max of the partition_count to get the dp world_size.
|
156 |
+
|
157 |
+
if type(world_size) is list:
|
158 |
+
world_size = max(world_size)
|
159 |
+
|
160 |
+
if world_size != total_files:
|
161 |
+
raise ValueError(
|
162 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
163 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
164 |
+
)
|
165 |
+
|
166 |
+
# the groups are named differently in each stage
|
167 |
+
if zero_stage <= 2:
|
168 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
169 |
+
elif zero_stage == 3:
|
170 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
171 |
+
else:
|
172 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
173 |
+
|
174 |
+
if zero_stage <= 2:
|
175 |
+
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
176 |
+
elif zero_stage == 3:
|
177 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
178 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
179 |
+
#
|
180 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
181 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
182 |
+
|
183 |
+
fp32_flat_groups = [
|
184 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
185 |
+
]
|
186 |
+
|
187 |
+
return zero_stage, world_size, fp32_flat_groups
|
188 |
+
|
189 |
+
|
190 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
|
191 |
+
"""
|
192 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
193 |
+
|
194 |
+
Args:
|
195 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
196 |
+
|
197 |
+
"""
|
198 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
199 |
+
|
200 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
201 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
202 |
+
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
203 |
+
|
204 |
+
model_files = get_model_state_files(ds_checkpoint_dir)
|
205 |
+
|
206 |
+
zero_model_states = parse_model_states(model_files)
|
207 |
+
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
208 |
+
|
209 |
+
if zero_stage <= 2:
|
210 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
211 |
+
elif zero_stage == 3:
|
212 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
213 |
+
|
214 |
+
|
215 |
+
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
216 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
217 |
+
return
|
218 |
+
|
219 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
220 |
+
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
221 |
+
|
222 |
+
if debug:
|
223 |
+
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
224 |
+
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
225 |
+
|
226 |
+
wanted_params = len(frozen_param_shapes)
|
227 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
228 |
+
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
229 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
230 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
231 |
+
|
232 |
+
total_params = 0
|
233 |
+
total_numel = 0
|
234 |
+
for name, shape in frozen_param_shapes.items():
|
235 |
+
total_params += 1
|
236 |
+
unpartitioned_numel = shape.numel()
|
237 |
+
total_numel += unpartitioned_numel
|
238 |
+
|
239 |
+
state_dict[name] = frozen_param_fragments[name]
|
240 |
+
|
241 |
+
if debug:
|
242 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
243 |
+
|
244 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
245 |
+
|
246 |
+
|
247 |
+
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
248 |
+
param_shapes = zero_model_states[0].param_shapes
|
249 |
+
|
250 |
+
# Reconstruction protocol:
|
251 |
+
#
|
252 |
+
# XXX: document this
|
253 |
+
|
254 |
+
if debug:
|
255 |
+
for i in range(world_size):
|
256 |
+
for j in range(len(fp32_flat_groups[0])):
|
257 |
+
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
258 |
+
|
259 |
+
# XXX: memory usage doubles here (zero2)
|
260 |
+
num_param_groups = len(fp32_flat_groups[0])
|
261 |
+
merged_single_partition_of_fp32_groups = []
|
262 |
+
for i in range(num_param_groups):
|
263 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
264 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
265 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
266 |
+
avail_numel = sum(
|
267 |
+
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
268 |
+
|
269 |
+
if debug:
|
270 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
271 |
+
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
272 |
+
# not asserting if there is a mismatch due to possible padding
|
273 |
+
print(f"Have {avail_numel} numels to process.")
|
274 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
275 |
+
|
276 |
+
# params
|
277 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
278 |
+
# out-of-core computing solution
|
279 |
+
total_numel = 0
|
280 |
+
total_params = 0
|
281 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
282 |
+
offset = 0
|
283 |
+
avail_numel = full_single_fp32_vector.numel()
|
284 |
+
for name, shape in shapes.items():
|
285 |
+
|
286 |
+
unpartitioned_numel = shape.numel()
|
287 |
+
total_numel += unpartitioned_numel
|
288 |
+
total_params += 1
|
289 |
+
|
290 |
+
if debug:
|
291 |
+
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
292 |
+
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
293 |
+
offset += unpartitioned_numel
|
294 |
+
|
295 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
296 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
297 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
298 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
299 |
+
align_to = 2 * world_size
|
300 |
+
|
301 |
+
def zero2_align(x):
|
302 |
+
return align_to * math.ceil(x / align_to)
|
303 |
+
|
304 |
+
if debug:
|
305 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
306 |
+
|
307 |
+
offset = zero2_align(offset)
|
308 |
+
avail_numel = zero2_align(avail_numel)
|
309 |
+
|
310 |
+
if debug:
|
311 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
312 |
+
|
313 |
+
# Sanity check
|
314 |
+
if offset != avail_numel:
|
315 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
316 |
+
|
317 |
+
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
318 |
+
|
319 |
+
|
320 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
321 |
+
state_dict = OrderedDict()
|
322 |
+
|
323 |
+
# buffers
|
324 |
+
buffers = zero_model_states[0].buffers
|
325 |
+
state_dict.update(buffers)
|
326 |
+
if debug:
|
327 |
+
print(f"added {len(buffers)} buffers")
|
328 |
+
|
329 |
+
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
330 |
+
|
331 |
+
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
332 |
+
|
333 |
+
# recover shared parameters
|
334 |
+
for pair in zero_model_states[0].shared_params:
|
335 |
+
if pair[1] in state_dict:
|
336 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
337 |
+
|
338 |
+
return state_dict
|
339 |
+
|
340 |
+
|
341 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
342 |
+
remainder = unpartitioned_numel % world_size
|
343 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
344 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
345 |
+
return partitioned_numel, padding_numel
|
346 |
+
|
347 |
+
|
348 |
+
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
349 |
+
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
350 |
+
return
|
351 |
+
|
352 |
+
if debug:
|
353 |
+
for i in range(world_size):
|
354 |
+
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
355 |
+
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
356 |
+
|
357 |
+
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
358 |
+
wanted_params = len(frozen_param_shapes)
|
359 |
+
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
360 |
+
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
361 |
+
print(f'Frozen params: Have {avail_numel} numels to process.')
|
362 |
+
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
363 |
+
|
364 |
+
total_params = 0
|
365 |
+
total_numel = 0
|
366 |
+
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
367 |
+
total_params += 1
|
368 |
+
unpartitioned_numel = shape.numel()
|
369 |
+
total_numel += unpartitioned_numel
|
370 |
+
|
371 |
+
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
372 |
+
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
373 |
+
|
374 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
375 |
+
|
376 |
+
if debug:
|
377 |
+
print(
|
378 |
+
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
379 |
+
)
|
380 |
+
|
381 |
+
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
382 |
+
|
383 |
+
|
384 |
+
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
385 |
+
param_shapes = zero_model_states[0].param_shapes
|
386 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
387 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
388 |
+
# param, re-consolidating each param, while dealing with padding if any
|
389 |
+
|
390 |
+
# merge list of dicts, preserving order
|
391 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
392 |
+
|
393 |
+
if debug:
|
394 |
+
for i in range(world_size):
|
395 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
396 |
+
|
397 |
+
wanted_params = len(param_shapes)
|
398 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
399 |
+
# not asserting if there is a mismatch due to possible padding
|
400 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
401 |
+
print(f"Trainable params: Have {avail_numel} numels to process.")
|
402 |
+
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
403 |
+
|
404 |
+
# params
|
405 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
406 |
+
# out-of-core computing solution
|
407 |
+
offset = 0
|
408 |
+
total_numel = 0
|
409 |
+
total_params = 0
|
410 |
+
for name, shape in param_shapes.items():
|
411 |
+
|
412 |
+
unpartitioned_numel = shape.numel()
|
413 |
+
total_numel += unpartitioned_numel
|
414 |
+
total_params += 1
|
415 |
+
|
416 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
417 |
+
|
418 |
+
if debug:
|
419 |
+
print(
|
420 |
+
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
421 |
+
)
|
422 |
+
|
423 |
+
# XXX: memory usage doubles here
|
424 |
+
state_dict[name] = torch.cat(
|
425 |
+
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
426 |
+
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
427 |
+
offset += partitioned_numel
|
428 |
+
|
429 |
+
offset *= world_size
|
430 |
+
|
431 |
+
# Sanity check
|
432 |
+
if offset != avail_numel:
|
433 |
+
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
434 |
+
|
435 |
+
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
436 |
+
|
437 |
+
|
438 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
439 |
+
state_dict = OrderedDict()
|
440 |
+
|
441 |
+
# buffers
|
442 |
+
buffers = zero_model_states[0].buffers
|
443 |
+
state_dict.update(buffers)
|
444 |
+
if debug:
|
445 |
+
print(f"added {len(buffers)} buffers")
|
446 |
+
|
447 |
+
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
448 |
+
|
449 |
+
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
450 |
+
|
451 |
+
# recover shared parameters
|
452 |
+
for pair in zero_model_states[0].shared_params:
|
453 |
+
if pair[1] in state_dict:
|
454 |
+
state_dict[pair[0]] = state_dict[pair[1]]
|
455 |
+
|
456 |
+
return state_dict
|
457 |
+
|
458 |
+
|
459 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
|
460 |
+
"""
|
461 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
462 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
463 |
+
via a model hub.
|
464 |
+
|
465 |
+
Args:
|
466 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
467 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
468 |
+
|
469 |
+
Returns:
|
470 |
+
- pytorch ``state_dict``
|
471 |
+
|
472 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
473 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
474 |
+
the checkpoint.
|
475 |
+
|
476 |
+
A typical usage might be ::
|
477 |
+
|
478 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
479 |
+
# do the training and checkpoint saving
|
480 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
481 |
+
model = model.cpu() # move to cpu
|
482 |
+
model.load_state_dict(state_dict)
|
483 |
+
# submit to model hub or save the model to share with others
|
484 |
+
|
485 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
486 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
487 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
488 |
+
|
489 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
490 |
+
|
491 |
+
"""
|
492 |
+
if tag is None:
|
493 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
494 |
+
if os.path.isfile(latest_path):
|
495 |
+
with open(latest_path, 'r') as fd:
|
496 |
+
tag = fd.read().strip()
|
497 |
+
else:
|
498 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
499 |
+
|
500 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
501 |
+
|
502 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
503 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
504 |
+
|
505 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
|
506 |
+
|
507 |
+
|
508 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
|
509 |
+
"""
|
510 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
511 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
512 |
+
|
513 |
+
Args:
|
514 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
515 |
+
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
|
516 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
517 |
+
"""
|
518 |
+
|
519 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
520 |
+
print(f"Saving fp32 state dict to {output_file}")
|
521 |
+
torch.save(state_dict, output_file)
|
522 |
+
|
523 |
+
|
524 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
525 |
+
"""
|
526 |
+
1. Put the provided model to cpu
|
527 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
528 |
+
3. Load it into the provided model
|
529 |
+
|
530 |
+
Args:
|
531 |
+
- ``model``: the model object to update
|
532 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
533 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
534 |
+
|
535 |
+
Returns:
|
536 |
+
- ``model`: modified model
|
537 |
+
|
538 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
539 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
540 |
+
conveniently placed for you in the checkpoint folder.
|
541 |
+
|
542 |
+
A typical usage might be ::
|
543 |
+
|
544 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
545 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
546 |
+
# submit to model hub or save the model to share with others
|
547 |
+
|
548 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
549 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
550 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
551 |
+
|
552 |
+
"""
|
553 |
+
logger.info(f"Extracting fp32 weights")
|
554 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
555 |
+
|
556 |
+
logger.info(f"Overwriting model with fp32 weights")
|
557 |
+
model = model.cpu()
|
558 |
+
model.load_state_dict(state_dict, strict=False)
|
559 |
+
|
560 |
+
return model
|
561 |
+
|
562 |
+
|
563 |
+
if __name__ == "__main__":
|
564 |
+
|
565 |
+
parser = argparse.ArgumentParser()
|
566 |
+
parser.add_argument("checkpoint_dir",
|
567 |
+
type=str,
|
568 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
569 |
+
parser.add_argument(
|
570 |
+
"output_file",
|
571 |
+
type=str,
|
572 |
+
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
|
573 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
574 |
+
args = parser.parse_args()
|
575 |
+
|
576 |
+
debug = args.debug
|
577 |
+
|
578 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
|
feature_extractor/preprocessor_config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"crop_size": {
|
3 |
+
"height": 224,
|
4 |
+
"width": 224
|
5 |
+
},
|
6 |
+
"do_center_crop": true,
|
7 |
+
"do_convert_rgb": true,
|
8 |
+
"do_normalize": true,
|
9 |
+
"do_rescale": true,
|
10 |
+
"do_resize": true,
|
11 |
+
"feature_extractor_type": "CLIPFeatureExtractor",
|
12 |
+
"image_mean": [
|
13 |
+
0.48145466,
|
14 |
+
0.4578275,
|
15 |
+
0.40821073
|
16 |
+
],
|
17 |
+
"image_processor_type": "CLIPImageProcessor",
|
18 |
+
"image_std": [
|
19 |
+
0.26862954,
|
20 |
+
0.26130258,
|
21 |
+
0.27577711
|
22 |
+
],
|
23 |
+
"resample": 3,
|
24 |
+
"rescale_factor": 0.00392156862745098,
|
25 |
+
"size": {
|
26 |
+
"shortest_edge": 224
|
27 |
+
}
|
28 |
+
}
|
learned_embeds-steps-1000.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:08b7f108bc591e21d38db2e23a3c9b206e1ee28175fb694b270efcf8c12c245c
|
3 |
+
size 4001
|
learned_embeds-steps-1500.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f6168c8e061aa6705c0c1dcab11636acd4487b81ef0e7e6903ab7e92162c0616
|
3 |
+
size 4001
|
learned_embeds-steps-2000.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:139e4e81ba1f577c279cb1d667480f2effbd7fc86d23936cf89e087e94b2bde6
|
3 |
+
size 4001
|
learned_embeds-steps-2500.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fbef87c1638c0bb61b61d7db277ff3dd5114d6de7cdffbe0d518f3b5a48f46d2
|
3 |
+
size 4001
|
learned_embeds-steps-3000.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5c21f4e4dea3524c8153095882ca933b6bf4fef21e7e7fc1e958ac1f00bdb090
|
3 |
+
size 4001
|
learned_embeds-steps-500.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:16769dca6ba91a0ecce3472643a41d32a52ade9c431cfa241338d153a8033473
|
3 |
+
size 3998
|
learned_embeds.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:00a2916ba2a11edb240caffdfee59602f5a61fc9814bcf5d6fd900cc408a0fe5
|
3 |
+
size 3840
|
logs/textual_inversion/1695306969.1297772/events.out.tfevents.1695306969.90fb41ce5dc1.2595613.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:592534adefdf07bda65cfb324edf596a04f18e5d8671655e6b7e603f6472d644
|
3 |
+
size 2299
|
logs/textual_inversion/1695306969.1304317/hparams.yml
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
center_crop: false
|
7 |
+
checkpointing_steps: 500
|
8 |
+
checkpoints_total_limit: null
|
9 |
+
dataloader_num_workers: 0
|
10 |
+
enable_xformers_memory_efficient_attention: false
|
11 |
+
gradient_accumulation_steps: 4
|
12 |
+
gradient_checkpointing: false
|
13 |
+
hub_model_id: null
|
14 |
+
hub_token: null
|
15 |
+
initializer_token: plane
|
16 |
+
learnable_property: style
|
17 |
+
learning_rate: 0.002
|
18 |
+
local_rank: 0
|
19 |
+
logging_dir: logs
|
20 |
+
lr_num_cycles: 1
|
21 |
+
lr_scheduler: constant
|
22 |
+
lr_warmup_steps: 0
|
23 |
+
max_train_steps: 3000
|
24 |
+
mixed_precision: 'no'
|
25 |
+
num_train_epochs: 2
|
26 |
+
num_validation_images: 4
|
27 |
+
num_vectors: 1
|
28 |
+
output_dir: textual_inversion_airplane
|
29 |
+
placeholder_token: <airplane>
|
30 |
+
pretrained_model_name_or_path: runwayml/stable-diffusion-v1-5
|
31 |
+
push_to_hub: true
|
32 |
+
repeats: 100
|
33 |
+
report_to: tensorboard
|
34 |
+
resolution: 512
|
35 |
+
resume_from_checkpoint: null
|
36 |
+
revision: null
|
37 |
+
save_as_full_pipeline: false
|
38 |
+
save_steps: 500
|
39 |
+
scale_lr: true
|
40 |
+
seed: null
|
41 |
+
tokenizer_name: null
|
42 |
+
train_batch_size: 1
|
43 |
+
train_data_dir: /workspace/Remote_sensor/airplane_png
|
44 |
+
validation_epochs: null
|
45 |
+
validation_prompt: null
|
46 |
+
validation_steps: 100
|
logs/textual_inversion/events.out.tfevents.1695306969.90fb41ce5dc1.2595613.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eaba644d3a1b5eb5af96f415726bdb20d5a252e5c09e5b0edd0e7ba170fbb455
|
3 |
+
size 251918
|
model_index.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "StableDiffusionPipeline",
|
3 |
+
"_diffusers_version": "0.18.2",
|
4 |
+
"feature_extractor": [
|
5 |
+
"transformers",
|
6 |
+
"CLIPImageProcessor"
|
7 |
+
],
|
8 |
+
"requires_safety_checker": true,
|
9 |
+
"safety_checker": [
|
10 |
+
"stable_diffusion",
|
11 |
+
"StableDiffusionSafetyChecker"
|
12 |
+
],
|
13 |
+
"scheduler": [
|
14 |
+
"diffusers",
|
15 |
+
"PNDMScheduler"
|
16 |
+
],
|
17 |
+
"text_encoder": [
|
18 |
+
"transformers",
|
19 |
+
"CLIPTextModel"
|
20 |
+
],
|
21 |
+
"tokenizer": [
|
22 |
+
"transformers",
|
23 |
+
"CLIPTokenizer"
|
24 |
+
],
|
25 |
+
"unet": [
|
26 |
+
"diffusers",
|
27 |
+
"UNet2DConditionModel"
|
28 |
+
],
|
29 |
+
"vae": [
|
30 |
+
"diffusers",
|
31 |
+
"AutoencoderKL"
|
32 |
+
]
|
33 |
+
}
|
safety_checker/config.json
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_commit_hash": "1d0c4ebf6ff58a5caecab40fa1406526bca4b5b9",
|
3 |
+
"_name_or_path": "/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/1d0c4ebf6ff58a5caecab40fa1406526bca4b5b9/safety_checker",
|
4 |
+
"architectures": [
|
5 |
+
"StableDiffusionSafetyChecker"
|
6 |
+
],
|
7 |
+
"initializer_factor": 1.0,
|
8 |
+
"logit_scale_init_value": 2.6592,
|
9 |
+
"model_type": "clip",
|
10 |
+
"projection_dim": 768,
|
11 |
+
"text_config": {
|
12 |
+
"_name_or_path": "",
|
13 |
+
"add_cross_attention": false,
|
14 |
+
"architectures": null,
|
15 |
+
"attention_dropout": 0.0,
|
16 |
+
"bad_words_ids": null,
|
17 |
+
"begin_suppress_tokens": null,
|
18 |
+
"bos_token_id": 49406,
|
19 |
+
"chunk_size_feed_forward": 0,
|
20 |
+
"cross_attention_hidden_size": null,
|
21 |
+
"decoder_start_token_id": null,
|
22 |
+
"diversity_penalty": 0.0,
|
23 |
+
"do_sample": false,
|
24 |
+
"dropout": 0.0,
|
25 |
+
"early_stopping": false,
|
26 |
+
"encoder_no_repeat_ngram_size": 0,
|
27 |
+
"eos_token_id": 49407,
|
28 |
+
"exponential_decay_length_penalty": null,
|
29 |
+
"finetuning_task": null,
|
30 |
+
"forced_bos_token_id": null,
|
31 |
+
"forced_eos_token_id": null,
|
32 |
+
"hidden_act": "quick_gelu",
|
33 |
+
"hidden_size": 768,
|
34 |
+
"id2label": {
|
35 |
+
"0": "LABEL_0",
|
36 |
+
"1": "LABEL_1"
|
37 |
+
},
|
38 |
+
"initializer_factor": 1.0,
|
39 |
+
"initializer_range": 0.02,
|
40 |
+
"intermediate_size": 3072,
|
41 |
+
"is_decoder": false,
|
42 |
+
"is_encoder_decoder": false,
|
43 |
+
"label2id": {
|
44 |
+
"LABEL_0": 0,
|
45 |
+
"LABEL_1": 1
|
46 |
+
},
|
47 |
+
"layer_norm_eps": 1e-05,
|
48 |
+
"length_penalty": 1.0,
|
49 |
+
"max_length": 20,
|
50 |
+
"max_position_embeddings": 77,
|
51 |
+
"min_length": 0,
|
52 |
+
"model_type": "clip_text_model",
|
53 |
+
"no_repeat_ngram_size": 0,
|
54 |
+
"num_attention_heads": 12,
|
55 |
+
"num_beam_groups": 1,
|
56 |
+
"num_beams": 1,
|
57 |
+
"num_hidden_layers": 12,
|
58 |
+
"num_return_sequences": 1,
|
59 |
+
"output_attentions": false,
|
60 |
+
"output_hidden_states": false,
|
61 |
+
"output_scores": false,
|
62 |
+
"pad_token_id": 1,
|
63 |
+
"prefix": null,
|
64 |
+
"problem_type": null,
|
65 |
+
"projection_dim": 512,
|
66 |
+
"pruned_heads": {},
|
67 |
+
"remove_invalid_values": false,
|
68 |
+
"repetition_penalty": 1.0,
|
69 |
+
"return_dict": true,
|
70 |
+
"return_dict_in_generate": false,
|
71 |
+
"sep_token_id": null,
|
72 |
+
"suppress_tokens": null,
|
73 |
+
"task_specific_params": null,
|
74 |
+
"temperature": 1.0,
|
75 |
+
"tf_legacy_loss": false,
|
76 |
+
"tie_encoder_decoder": false,
|
77 |
+
"tie_word_embeddings": true,
|
78 |
+
"tokenizer_class": null,
|
79 |
+
"top_k": 50,
|
80 |
+
"top_p": 1.0,
|
81 |
+
"torch_dtype": null,
|
82 |
+
"torchscript": false,
|
83 |
+
"transformers_version": "4.31.0",
|
84 |
+
"typical_p": 1.0,
|
85 |
+
"use_bfloat16": false,
|
86 |
+
"vocab_size": 49408
|
87 |
+
},
|
88 |
+
"torch_dtype": "float32",
|
89 |
+
"transformers_version": null,
|
90 |
+
"vision_config": {
|
91 |
+
"_name_or_path": "",
|
92 |
+
"add_cross_attention": false,
|
93 |
+
"architectures": null,
|
94 |
+
"attention_dropout": 0.0,
|
95 |
+
"bad_words_ids": null,
|
96 |
+
"begin_suppress_tokens": null,
|
97 |
+
"bos_token_id": null,
|
98 |
+
"chunk_size_feed_forward": 0,
|
99 |
+
"cross_attention_hidden_size": null,
|
100 |
+
"decoder_start_token_id": null,
|
101 |
+
"diversity_penalty": 0.0,
|
102 |
+
"do_sample": false,
|
103 |
+
"dropout": 0.0,
|
104 |
+
"early_stopping": false,
|
105 |
+
"encoder_no_repeat_ngram_size": 0,
|
106 |
+
"eos_token_id": null,
|
107 |
+
"exponential_decay_length_penalty": null,
|
108 |
+
"finetuning_task": null,
|
109 |
+
"forced_bos_token_id": null,
|
110 |
+
"forced_eos_token_id": null,
|
111 |
+
"hidden_act": "quick_gelu",
|
112 |
+
"hidden_size": 1024,
|
113 |
+
"id2label": {
|
114 |
+
"0": "LABEL_0",
|
115 |
+
"1": "LABEL_1"
|
116 |
+
},
|
117 |
+
"image_size": 224,
|
118 |
+
"initializer_factor": 1.0,
|
119 |
+
"initializer_range": 0.02,
|
120 |
+
"intermediate_size": 4096,
|
121 |
+
"is_decoder": false,
|
122 |
+
"is_encoder_decoder": false,
|
123 |
+
"label2id": {
|
124 |
+
"LABEL_0": 0,
|
125 |
+
"LABEL_1": 1
|
126 |
+
},
|
127 |
+
"layer_norm_eps": 1e-05,
|
128 |
+
"length_penalty": 1.0,
|
129 |
+
"max_length": 20,
|
130 |
+
"min_length": 0,
|
131 |
+
"model_type": "clip_vision_model",
|
132 |
+
"no_repeat_ngram_size": 0,
|
133 |
+
"num_attention_heads": 16,
|
134 |
+
"num_beam_groups": 1,
|
135 |
+
"num_beams": 1,
|
136 |
+
"num_channels": 3,
|
137 |
+
"num_hidden_layers": 24,
|
138 |
+
"num_return_sequences": 1,
|
139 |
+
"output_attentions": false,
|
140 |
+
"output_hidden_states": false,
|
141 |
+
"output_scores": false,
|
142 |
+
"pad_token_id": null,
|
143 |
+
"patch_size": 14,
|
144 |
+
"prefix": null,
|
145 |
+
"problem_type": null,
|
146 |
+
"projection_dim": 512,
|
147 |
+
"pruned_heads": {},
|
148 |
+
"remove_invalid_values": false,
|
149 |
+
"repetition_penalty": 1.0,
|
150 |
+
"return_dict": true,
|
151 |
+
"return_dict_in_generate": false,
|
152 |
+
"sep_token_id": null,
|
153 |
+
"suppress_tokens": null,
|
154 |
+
"task_specific_params": null,
|
155 |
+
"temperature": 1.0,
|
156 |
+
"tf_legacy_loss": false,
|
157 |
+
"tie_encoder_decoder": false,
|
158 |
+
"tie_word_embeddings": true,
|
159 |
+
"tokenizer_class": null,
|
160 |
+
"top_k": 50,
|
161 |
+
"top_p": 1.0,
|
162 |
+
"torch_dtype": null,
|
163 |
+
"torchscript": false,
|
164 |
+
"transformers_version": "4.31.0",
|
165 |
+
"typical_p": 1.0,
|
166 |
+
"use_bfloat16": false
|
167 |
+
}
|
168 |
+
}
|