Datasets:

ArXiv:
Wauplin HF staff commited on
Commit
f562984
1 Parent(s): 6db45d7

Delete checkpoint_merger.py

Browse files
Files changed (1) hide show
  1. checkpoint_merger.py +0 -287
checkpoint_merger.py DELETED
@@ -1,287 +0,0 @@
1
- import glob
2
- import os
3
- from typing import Dict, List, Union
4
-
5
- import safetensors.torch
6
- import torch
7
- from huggingface_hub import snapshot_download
8
- from huggingface_hub.utils import validate_hf_hub_args
9
-
10
- from diffusers import DiffusionPipeline, __version__
11
- from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
12
- from diffusers.utils import CONFIG_NAME, ONNX_WEIGHTS_NAME, WEIGHTS_NAME
13
-
14
-
15
- class CheckpointMergerPipeline(DiffusionPipeline):
16
- """
17
- A class that supports merging diffusion models based on the discussion here:
18
- https://github.com/huggingface/diffusers/issues/877
19
-
20
- Example usage:-
21
-
22
- pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="checkpoint_merger.py")
23
-
24
- merged_pipe = pipe.merge(["CompVis/stable-diffusion-v1-4","prompthero/openjourney"], interp = 'inv_sigmoid', alpha = 0.8, force = True)
25
-
26
- merged_pipe.to('cuda')
27
-
28
- prompt = "An astronaut riding a unicycle on Mars"
29
-
30
- results = merged_pipe(prompt)
31
-
32
- ## For more details, see the docstring for the merge method.
33
-
34
- """
35
-
36
- def __init__(self):
37
- self.register_to_config()
38
- super().__init__()
39
-
40
- def _compare_model_configs(self, dict0, dict1):
41
- if dict0 == dict1:
42
- return True
43
- else:
44
- config0, meta_keys0 = self._remove_meta_keys(dict0)
45
- config1, meta_keys1 = self._remove_meta_keys(dict1)
46
- if config0 == config1:
47
- print(f"Warning !: Mismatch in keys {meta_keys0} and {meta_keys1}.")
48
- return True
49
- return False
50
-
51
- def _remove_meta_keys(self, config_dict: Dict):
52
- meta_keys = []
53
- temp_dict = config_dict.copy()
54
- for key in config_dict.keys():
55
- if key.startswith("_"):
56
- temp_dict.pop(key)
57
- meta_keys.append(key)
58
- return (temp_dict, meta_keys)
59
-
60
- @torch.no_grad()
61
- @validate_hf_hub_args
62
- def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]], **kwargs):
63
- """
64
- Returns a new pipeline object of the class 'DiffusionPipeline' with the merged checkpoints(weights) of the models passed
65
- in the argument 'pretrained_model_name_or_path_list' as a list.
66
-
67
- Parameters:
68
- -----------
69
- pretrained_model_name_or_path_list : A list of valid pretrained model names in the HuggingFace hub or paths to locally stored models in the HuggingFace format.
70
-
71
- **kwargs:
72
- Supports all the default DiffusionPipeline.get_config_dict kwargs viz..
73
-
74
- cache_dir, resume_download, force_download, proxies, local_files_only, token, revision, torch_dtype, device_map.
75
-
76
- alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
77
- would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
78
-
79
- interp - The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_diff" and None.
80
- Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_diff" is supported.
81
-
82
- force - Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
83
-
84
- variant - which variant of a pretrained model to load, e.g. "fp16" (None)
85
-
86
- """
87
- # Default kwargs from DiffusionPipeline
88
- cache_dir = kwargs.pop("cache_dir", None)
89
- resume_download = kwargs.pop("resume_download", False)
90
- force_download = kwargs.pop("force_download", False)
91
- proxies = kwargs.pop("proxies", None)
92
- local_files_only = kwargs.pop("local_files_only", False)
93
- token = kwargs.pop("token", None)
94
- variant = kwargs.pop("variant", None)
95
- revision = kwargs.pop("revision", None)
96
- torch_dtype = kwargs.pop("torch_dtype", None)
97
- device_map = kwargs.pop("device_map", None)
98
-
99
- alpha = kwargs.pop("alpha", 0.5)
100
- interp = kwargs.pop("interp", None)
101
-
102
- print("Received list", pretrained_model_name_or_path_list)
103
- print(f"Combining with alpha={alpha}, interpolation mode={interp}")
104
-
105
- checkpoint_count = len(pretrained_model_name_or_path_list)
106
- # Ignore result from model_index_json comparison of the two checkpoints
107
- force = kwargs.pop("force", False)
108
-
109
- # If less than 2 checkpoints, nothing to merge. If more than 3, not supported for now.
110
- if checkpoint_count > 3 or checkpoint_count < 2:
111
- raise ValueError(
112
- "Received incorrect number of checkpoints to merge. Ensure that either 2 or 3 checkpoints are being"
113
- " passed."
114
- )
115
-
116
- print("Received the right number of checkpoints")
117
- # chkpt0, chkpt1 = pretrained_model_name_or_path_list[0:2]
118
- # chkpt2 = pretrained_model_name_or_path_list[2] if checkpoint_count == 3 else None
119
-
120
- # Validate that the checkpoints can be merged
121
- # Step 1: Load the model config and compare the checkpoints. We'll compare the model_index.json first while ignoring the keys starting with '_'
122
- config_dicts = []
123
- for pretrained_model_name_or_path in pretrained_model_name_or_path_list:
124
- config_dict = DiffusionPipeline.load_config(
125
- pretrained_model_name_or_path,
126
- cache_dir=cache_dir,
127
- resume_download=resume_download,
128
- force_download=force_download,
129
- proxies=proxies,
130
- local_files_only=local_files_only,
131
- token=token,
132
- revision=revision,
133
- )
134
- config_dicts.append(config_dict)
135
-
136
- comparison_result = True
137
- for idx in range(1, len(config_dicts)):
138
- comparison_result &= self._compare_model_configs(config_dicts[idx - 1], config_dicts[idx])
139
- if not force and comparison_result is False:
140
- raise ValueError("Incompatible checkpoints. Please check model_index.json for the models.")
141
- print("Compatible model_index.json files found")
142
- # Step 2: Basic Validation has succeeded. Let's download the models and save them into our local files.
143
- cached_folders = []
144
- for pretrained_model_name_or_path, config_dict in zip(pretrained_model_name_or_path_list, config_dicts):
145
- folder_names = [k for k in config_dict.keys() if not k.startswith("_")]
146
- allow_patterns = [os.path.join(k, "*") for k in folder_names]
147
- allow_patterns += [
148
- WEIGHTS_NAME,
149
- SCHEDULER_CONFIG_NAME,
150
- CONFIG_NAME,
151
- ONNX_WEIGHTS_NAME,
152
- DiffusionPipeline.config_name,
153
- ]
154
- requested_pipeline_class = config_dict.get("_class_name")
155
- user_agent = {"diffusers": __version__, "pipeline_class": requested_pipeline_class}
156
-
157
- cached_folder = (
158
- pretrained_model_name_or_path
159
- if os.path.isdir(pretrained_model_name_or_path)
160
- else snapshot_download(
161
- pretrained_model_name_or_path,
162
- cache_dir=cache_dir,
163
- resume_download=resume_download,
164
- proxies=proxies,
165
- local_files_only=local_files_only,
166
- token=token,
167
- revision=revision,
168
- allow_patterns=allow_patterns,
169
- user_agent=user_agent,
170
- )
171
- )
172
- print("Cached Folder", cached_folder)
173
- cached_folders.append(cached_folder)
174
-
175
- # Step 3:-
176
- # Load the first checkpoint as a diffusion pipeline and modify its module state_dict in place
177
- final_pipe = DiffusionPipeline.from_pretrained(
178
- cached_folders[0],
179
- torch_dtype=torch_dtype,
180
- device_map=device_map,
181
- variant=variant,
182
- )
183
- final_pipe.to(self.device)
184
-
185
- checkpoint_path_2 = None
186
- if len(cached_folders) > 2:
187
- checkpoint_path_2 = os.path.join(cached_folders[2])
188
-
189
- if interp == "sigmoid":
190
- theta_func = CheckpointMergerPipeline.sigmoid
191
- elif interp == "inv_sigmoid":
192
- theta_func = CheckpointMergerPipeline.inv_sigmoid
193
- elif interp == "add_diff":
194
- theta_func = CheckpointMergerPipeline.add_difference
195
- else:
196
- theta_func = CheckpointMergerPipeline.weighted_sum
197
-
198
- # Find each module's state dict.
199
- for attr in final_pipe.config.keys():
200
- if not attr.startswith("_"):
201
- checkpoint_path_1 = os.path.join(cached_folders[1], attr)
202
- if os.path.exists(checkpoint_path_1):
203
- files = [
204
- *glob.glob(os.path.join(checkpoint_path_1, "*.safetensors")),
205
- *glob.glob(os.path.join(checkpoint_path_1, "*.bin")),
206
- ]
207
- checkpoint_path_1 = files[0] if len(files) > 0 else None
208
- if len(cached_folders) < 3:
209
- checkpoint_path_2 = None
210
- else:
211
- checkpoint_path_2 = os.path.join(cached_folders[2], attr)
212
- if os.path.exists(checkpoint_path_2):
213
- files = [
214
- *glob.glob(os.path.join(checkpoint_path_2, "*.safetensors")),
215
- *glob.glob(os.path.join(checkpoint_path_2, "*.bin")),
216
- ]
217
- checkpoint_path_2 = files[0] if len(files) > 0 else None
218
- # For an attr if both checkpoint_path_1 and 2 are None, ignore.
219
- # If at least one is present, deal with it according to interp method, of course only if the state_dict keys match.
220
- if checkpoint_path_1 is None and checkpoint_path_2 is None:
221
- print(f"Skipping {attr}: not present in 2nd or 3d model")
222
- continue
223
- try:
224
- module = getattr(final_pipe, attr)
225
- if isinstance(module, bool): # ignore requires_safety_checker boolean
226
- continue
227
- theta_0 = getattr(module, "state_dict")
228
- theta_0 = theta_0()
229
-
230
- update_theta_0 = getattr(module, "load_state_dict")
231
- theta_1 = (
232
- safetensors.torch.load_file(checkpoint_path_1)
233
- if (checkpoint_path_1.endswith(".safetensors"))
234
- else torch.load(checkpoint_path_1, map_location="cpu")
235
- )
236
- theta_2 = None
237
- if checkpoint_path_2:
238
- theta_2 = (
239
- safetensors.torch.load_file(checkpoint_path_2)
240
- if (checkpoint_path_2.endswith(".safetensors"))
241
- else torch.load(checkpoint_path_2, map_location="cpu")
242
- )
243
-
244
- if not theta_0.keys() == theta_1.keys():
245
- print(f"Skipping {attr}: key mismatch")
246
- continue
247
- if theta_2 and not theta_1.keys() == theta_2.keys():
248
- print(f"Skipping {attr}:y mismatch")
249
- except Exception as e:
250
- print(f"Skipping {attr} do to an unexpected error: {str(e)}")
251
- continue
252
- print(f"MERGING {attr}")
253
-
254
- for key in theta_0.keys():
255
- if theta_2:
256
- theta_0[key] = theta_func(theta_0[key], theta_1[key], theta_2[key], alpha)
257
- else:
258
- theta_0[key] = theta_func(theta_0[key], theta_1[key], None, alpha)
259
-
260
- del theta_1
261
- del theta_2
262
- update_theta_0(theta_0)
263
-
264
- del theta_0
265
- return final_pipe
266
-
267
- @staticmethod
268
- def weighted_sum(theta0, theta1, theta2, alpha):
269
- return ((1 - alpha) * theta0) + (alpha * theta1)
270
-
271
- # Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
272
- @staticmethod
273
- def sigmoid(theta0, theta1, theta2, alpha):
274
- alpha = alpha * alpha * (3 - (2 * alpha))
275
- return theta0 + ((theta1 - theta0) * alpha)
276
-
277
- # Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
278
- @staticmethod
279
- def inv_sigmoid(theta0, theta1, theta2, alpha):
280
- import math
281
-
282
- alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0)
283
- return theta0 + ((theta1 - theta0) * alpha)
284
-
285
- @staticmethod
286
- def add_difference(theta0, theta1, theta2, alpha):
287
- return theta0 + (theta1 - theta2) * (1.0 - alpha)