sayakpaul HF staff commited on
Commit
afcb522
1 Parent(s): 8352d5a

Upload train_with_fixes.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_with_fixes.py +1700 -0
train_with_fixes.py ADDED
@@ -0,0 +1,1700 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ import argparse
17
+ import gc
18
+ import itertools
19
+ import logging
20
+ import math
21
+ import os
22
+ import shutil
23
+ import warnings
24
+ from pathlib import Path
25
+
26
+ import numpy as np
27
+ import torch
28
+ import torch.nn.functional as F
29
+ import torch.utils.checkpoint
30
+ import transformers
31
+ from accelerate import Accelerator
32
+ from accelerate.logging import get_logger
33
+ from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
34
+ from huggingface_hub import create_repo, upload_folder
35
+ from huggingface_hub.utils import insecure_hashlib
36
+ from packaging import version
37
+ from peft import LoraConfig
38
+ from peft.utils import get_peft_model_state_dict
39
+ from PIL import Image
40
+ from PIL.ImageOps import exif_transpose
41
+ from torch.utils.data import Dataset
42
+ from torchvision import transforms
43
+ from tqdm.auto import tqdm
44
+ from transformers import AutoTokenizer, PretrainedConfig
45
+
46
+ import diffusers
47
+ from diffusers import (
48
+ AutoencoderKL,
49
+ DDPMScheduler,
50
+ DPMSolverMultistepScheduler,
51
+ StableDiffusionXLPipeline,
52
+ UNet2DConditionModel,
53
+ )
54
+ from diffusers.loaders import LoraLoaderMixin
55
+ from diffusers.optimization import get_scheduler
56
+ from diffusers.training_utils import compute_snr
57
+ from diffusers.utils import check_min_version, is_wandb_available
58
+ from diffusers.utils.import_utils import is_xformers_available
59
+
60
+
61
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
62
+ check_min_version("0.25.0.dev0")
63
+
64
+ logger = get_logger(__name__)
65
+
66
+
67
+ def save_model_card(
68
+ repo_id: str,
69
+ images=None,
70
+ base_model=str,
71
+ train_text_encoder=False,
72
+ instance_prompt=str,
73
+ validation_prompt=str,
74
+ repo_folder=None,
75
+ vae_path=None,
76
+ ):
77
+ img_str = "widget:\n" if images else ""
78
+ for i, image in enumerate(images):
79
+ image.save(os.path.join(repo_folder, f"image_{i}.png"))
80
+ img_str += f"""
81
+ - text: '{validation_prompt if validation_prompt else ' ' }'
82
+ output:
83
+ url:
84
+ "image_{i}.png"
85
+ """
86
+
87
+ yaml = f"""
88
+ ---
89
+ tags:
90
+ - stable-diffusion-xl
91
+ - stable-diffusion-xl-diffusers
92
+ - text-to-image
93
+ - diffusers
94
+ - lora
95
+ - template:sd-lora
96
+ {img_str}
97
+ base_model: {base_model}
98
+ instance_prompt: {instance_prompt}
99
+ license: openrail++
100
+ ---
101
+ """
102
+
103
+ model_card = f"""
104
+ # SDXL LoRA DreamBooth - {repo_id}
105
+
106
+ <Gallery />
107
+
108
+ ## Model description
109
+
110
+ These are {repo_id} LoRA adaption weights for {base_model}.
111
+
112
+ The weights were trained using [DreamBooth](https://dreambooth.github.io/).
113
+
114
+ LoRA for the text encoder was enabled: {train_text_encoder}.
115
+
116
+ Special VAE used for training: {vae_path}.
117
+
118
+ ## Trigger words
119
+
120
+ You should use {instance_prompt} to trigger the image generation.
121
+
122
+ ## Download model
123
+
124
+ Weights for this model are available in Safetensors format.
125
+
126
+ [Download]({repo_id}/tree/main) them in the Files & versions tab.
127
+
128
+ """
129
+ with open(os.path.join(repo_folder, "README.md"), "w") as f:
130
+ f.write(yaml + model_card)
131
+
132
+
133
+ def import_model_class_from_model_name_or_path(
134
+ pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
135
+ ):
136
+ text_encoder_config = PretrainedConfig.from_pretrained(
137
+ pretrained_model_name_or_path, subfolder=subfolder, revision=revision
138
+ )
139
+ model_class = text_encoder_config.architectures[0]
140
+
141
+ if model_class == "CLIPTextModel":
142
+ from transformers import CLIPTextModel
143
+
144
+ return CLIPTextModel
145
+ elif model_class == "CLIPTextModelWithProjection":
146
+ from transformers import CLIPTextModelWithProjection
147
+
148
+ return CLIPTextModelWithProjection
149
+ else:
150
+ raise ValueError(f"{model_class} is not supported.")
151
+
152
+
153
+ def parse_args(input_args=None):
154
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
155
+ parser.add_argument(
156
+ "--pretrained_model_name_or_path",
157
+ type=str,
158
+ default=None,
159
+ required=True,
160
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
161
+ )
162
+ parser.add_argument(
163
+ "--pretrained_vae_model_name_or_path",
164
+ type=str,
165
+ default=None,
166
+ help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.",
167
+ )
168
+ parser.add_argument(
169
+ "--revision",
170
+ type=str,
171
+ default=None,
172
+ required=False,
173
+ help="Revision of pretrained model identifier from huggingface.co/models.",
174
+ )
175
+ parser.add_argument(
176
+ "--variant",
177
+ type=str,
178
+ default=None,
179
+ help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
180
+ )
181
+ parser.add_argument(
182
+ "--dataset_name",
183
+ type=str,
184
+ default=None,
185
+ help=(
186
+ "The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private,"
187
+ " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
188
+ " or to a folder containing files that 🤗 Datasets can understand."
189
+ ),
190
+ )
191
+ parser.add_argument(
192
+ "--dataset_config_name",
193
+ type=str,
194
+ default=None,
195
+ help="The config of the Dataset, leave as None if there's only one config.",
196
+ )
197
+ parser.add_argument(
198
+ "--instance_data_dir",
199
+ type=str,
200
+ default=None,
201
+ help=("A folder containing the training data. "),
202
+ )
203
+
204
+ parser.add_argument(
205
+ "--cache_dir",
206
+ type=str,
207
+ default=None,
208
+ help="The directory where the downloaded models and datasets will be stored.",
209
+ )
210
+
211
+ parser.add_argument(
212
+ "--image_column",
213
+ type=str,
214
+ default="image",
215
+ help="The column of the dataset containing the target image. By "
216
+ "default, the standard Image Dataset maps out 'file_name' "
217
+ "to 'image'.",
218
+ )
219
+ parser.add_argument(
220
+ "--caption_column",
221
+ type=str,
222
+ default=None,
223
+ help="The column of the dataset containing the instance prompt for each image",
224
+ )
225
+
226
+ parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.")
227
+
228
+ parser.add_argument(
229
+ "--class_data_dir",
230
+ type=str,
231
+ default=None,
232
+ required=False,
233
+ help="A folder containing the training data of class images.",
234
+ )
235
+ parser.add_argument(
236
+ "--instance_prompt",
237
+ type=str,
238
+ default=None,
239
+ required=True,
240
+ help="The prompt with identifier specifying the instance, e.g. 'photo of a TOK dog', 'in the style of TOK'",
241
+ )
242
+ parser.add_argument(
243
+ "--class_prompt",
244
+ type=str,
245
+ default=None,
246
+ help="The prompt to specify images in the same class as provided instance images.",
247
+ )
248
+ parser.add_argument(
249
+ "--validation_prompt",
250
+ type=str,
251
+ default=None,
252
+ help="A prompt that is used during validation to verify that the model is learning.",
253
+ )
254
+ parser.add_argument(
255
+ "--num_validation_images",
256
+ type=int,
257
+ default=4,
258
+ help="Number of images that should be generated during validation with `validation_prompt`.",
259
+ )
260
+ parser.add_argument(
261
+ "--validation_epochs",
262
+ type=int,
263
+ default=50,
264
+ help=(
265
+ "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt"
266
+ " `args.validation_prompt` multiple times: `args.num_validation_images`."
267
+ ),
268
+ )
269
+ parser.add_argument(
270
+ "--with_prior_preservation",
271
+ default=False,
272
+ action="store_true",
273
+ help="Flag to add prior preservation loss.",
274
+ )
275
+ parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
276
+ parser.add_argument(
277
+ "--num_class_images",
278
+ type=int,
279
+ default=100,
280
+ help=(
281
+ "Minimal class images for prior preservation loss. If there are not enough images already present in"
282
+ " class_data_dir, additional images will be sampled with class_prompt."
283
+ ),
284
+ )
285
+ parser.add_argument(
286
+ "--output_dir",
287
+ type=str,
288
+ default="lora-dreambooth-model",
289
+ help="The output directory where the model predictions and checkpoints will be written.",
290
+ )
291
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
292
+ parser.add_argument(
293
+ "--resolution",
294
+ type=int,
295
+ default=1024,
296
+ help=(
297
+ "The resolution for input images, all the images in the train/validation dataset will be resized to this"
298
+ " resolution"
299
+ ),
300
+ )
301
+ parser.add_argument(
302
+ "--crops_coords_top_left_h",
303
+ type=int,
304
+ default=0,
305
+ help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."),
306
+ )
307
+ parser.add_argument(
308
+ "--crops_coords_top_left_w",
309
+ type=int,
310
+ default=0,
311
+ help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."),
312
+ )
313
+ parser.add_argument(
314
+ "--center_crop",
315
+ default=False,
316
+ action="store_true",
317
+ help=(
318
+ "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
319
+ " cropped. The images will be resized to the resolution first before cropping."
320
+ ),
321
+ )
322
+ parser.add_argument(
323
+ "--train_text_encoder",
324
+ action="store_true",
325
+ help="Whether to train the text encoder. If set, the text encoder should be float32 precision.",
326
+ )
327
+ parser.add_argument(
328
+ "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
329
+ )
330
+ parser.add_argument(
331
+ "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
332
+ )
333
+ parser.add_argument("--num_train_epochs", type=int, default=1)
334
+ parser.add_argument(
335
+ "--max_train_steps",
336
+ type=int,
337
+ default=None,
338
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
339
+ )
340
+ parser.add_argument(
341
+ "--checkpointing_steps",
342
+ type=int,
343
+ default=500,
344
+ help=(
345
+ "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
346
+ " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
347
+ " training using `--resume_from_checkpoint`."
348
+ ),
349
+ )
350
+ parser.add_argument(
351
+ "--checkpoints_total_limit",
352
+ type=int,
353
+ default=None,
354
+ help=("Max number of checkpoints to store."),
355
+ )
356
+ parser.add_argument(
357
+ "--resume_from_checkpoint",
358
+ type=str,
359
+ default=None,
360
+ help=(
361
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
362
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
363
+ ),
364
+ )
365
+ parser.add_argument(
366
+ "--gradient_accumulation_steps",
367
+ type=int,
368
+ default=1,
369
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
370
+ )
371
+ parser.add_argument(
372
+ "--gradient_checkpointing",
373
+ action="store_true",
374
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
375
+ )
376
+ parser.add_argument(
377
+ "--learning_rate",
378
+ type=float,
379
+ default=1e-4,
380
+ help="Initial learning rate (after the potential warmup period) to use.",
381
+ )
382
+
383
+ parser.add_argument(
384
+ "--text_encoder_lr",
385
+ type=float,
386
+ default=5e-6,
387
+ help="Text encoder learning rate to use.",
388
+ )
389
+ parser.add_argument(
390
+ "--scale_lr",
391
+ action="store_true",
392
+ default=False,
393
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
394
+ )
395
+ parser.add_argument(
396
+ "--lr_scheduler",
397
+ type=str,
398
+ default="constant",
399
+ help=(
400
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
401
+ ' "constant", "constant_with_warmup"]'
402
+ ),
403
+ )
404
+
405
+ parser.add_argument(
406
+ "--snr_gamma",
407
+ type=float,
408
+ default=None,
409
+ help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. "
410
+ "More details here: https://arxiv.org/abs/2303.09556.",
411
+ )
412
+ parser.add_argument(
413
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
414
+ )
415
+ parser.add_argument(
416
+ "--lr_num_cycles",
417
+ type=int,
418
+ default=1,
419
+ help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
420
+ )
421
+ parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
422
+ parser.add_argument(
423
+ "--dataloader_num_workers",
424
+ type=int,
425
+ default=0,
426
+ help=(
427
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
428
+ ),
429
+ )
430
+
431
+ parser.add_argument(
432
+ "--optimizer",
433
+ type=str,
434
+ default="AdamW",
435
+ help=('The optimizer type to use. Choose between ["AdamW", "prodigy"]'),
436
+ )
437
+
438
+ parser.add_argument(
439
+ "--use_8bit_adam",
440
+ action="store_true",
441
+ help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW",
442
+ )
443
+
444
+ parser.add_argument(
445
+ "--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers."
446
+ )
447
+ parser.add_argument(
448
+ "--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam and Prodigy optimizers."
449
+ )
450
+ parser.add_argument(
451
+ "--prodigy_beta3",
452
+ type=float,
453
+ default=None,
454
+ help="coefficients for computing the Prodidy stepsize using running averages. If set to None, "
455
+ "uses the value of square root of beta2. Ignored if optimizer is adamW",
456
+ )
457
+ parser.add_argument("--prodigy_decouple", type=bool, default=True, help="Use AdamW style decoupled weight decay")
458
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params")
459
+ parser.add_argument(
460
+ "--adam_weight_decay_text_encoder", type=float, default=1e-03, help="Weight decay to use for text_encoder"
461
+ )
462
+
463
+ parser.add_argument(
464
+ "--adam_epsilon",
465
+ type=float,
466
+ default=1e-08,
467
+ help="Epsilon value for the Adam optimizer and Prodigy optimizers.",
468
+ )
469
+
470
+ parser.add_argument(
471
+ "--prodigy_use_bias_correction",
472
+ type=bool,
473
+ default=True,
474
+ help="Turn on Adam's bias correction. True by default. Ignored if optimizer is adamW",
475
+ )
476
+ parser.add_argument(
477
+ "--prodigy_safeguard_warmup",
478
+ type=bool,
479
+ default=True,
480
+ help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage. True by default. "
481
+ "Ignored if optimizer is adamW",
482
+ )
483
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
484
+ parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
485
+ parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
486
+ parser.add_argument(
487
+ "--hub_model_id",
488
+ type=str,
489
+ default=None,
490
+ help="The name of the repository to keep in sync with the local `output_dir`.",
491
+ )
492
+ parser.add_argument(
493
+ "--logging_dir",
494
+ type=str,
495
+ default="logs",
496
+ help=(
497
+ "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
498
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
499
+ ),
500
+ )
501
+ parser.add_argument(
502
+ "--allow_tf32",
503
+ action="store_true",
504
+ help=(
505
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
506
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
507
+ ),
508
+ )
509
+ parser.add_argument(
510
+ "--report_to",
511
+ type=str,
512
+ default="tensorboard",
513
+ help=(
514
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
515
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
516
+ ),
517
+ )
518
+ parser.add_argument(
519
+ "--mixed_precision",
520
+ type=str,
521
+ default=None,
522
+ choices=["no", "fp16", "bf16"],
523
+ help=(
524
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
525
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
526
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
527
+ ),
528
+ )
529
+ parser.add_argument(
530
+ "--prior_generation_precision",
531
+ type=str,
532
+ default=None,
533
+ choices=["no", "fp32", "fp16", "bf16"],
534
+ help=(
535
+ "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
536
+ " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
537
+ ),
538
+ )
539
+ parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
540
+ parser.add_argument(
541
+ "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
542
+ )
543
+ parser.add_argument(
544
+ "--rank",
545
+ type=int,
546
+ default=4,
547
+ help=("The dimension of the LoRA update matrices."),
548
+ )
549
+
550
+ if input_args is not None:
551
+ args = parser.parse_args(input_args)
552
+ else:
553
+ args = parser.parse_args()
554
+
555
+ if args.dataset_name is None and args.instance_data_dir is None:
556
+ raise ValueError("Specify either `--dataset_name` or `--instance_data_dir`")
557
+
558
+ if args.dataset_name is not None and args.instance_data_dir is not None:
559
+ raise ValueError("Specify only one of `--dataset_name` or `--instance_data_dir`")
560
+
561
+ env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
562
+ if env_local_rank != -1 and env_local_rank != args.local_rank:
563
+ args.local_rank = env_local_rank
564
+
565
+ if args.with_prior_preservation:
566
+ if args.class_data_dir is None:
567
+ raise ValueError("You must specify a data directory for class images.")
568
+ if args.class_prompt is None:
569
+ raise ValueError("You must specify prompt for class images.")
570
+ else:
571
+ # logger is not available yet
572
+ if args.class_data_dir is not None:
573
+ warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
574
+ if args.class_prompt is not None:
575
+ warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
576
+
577
+ return args
578
+
579
+
580
+ class DreamBoothDataset(Dataset):
581
+ """
582
+ A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
583
+ It pre-processes the images.
584
+ """
585
+
586
+ def __init__(
587
+ self,
588
+ instance_data_root,
589
+ instance_prompt,
590
+ class_prompt,
591
+ class_data_root=None,
592
+ class_num=None,
593
+ size=1024,
594
+ repeats=1,
595
+ center_crop=False,
596
+ ):
597
+ self.size = size
598
+ self.center_crop = center_crop
599
+
600
+ self.instance_prompt = instance_prompt
601
+ self.custom_instance_prompts = None
602
+ self.class_prompt = class_prompt
603
+
604
+ # if --dataset_name is provided or a metadata jsonl file is provided in the local --instance_data directory,
605
+ # we load the training data using load_dataset
606
+ if args.dataset_name is not None:
607
+ try:
608
+ from datasets import load_dataset
609
+ except ImportError:
610
+ raise ImportError(
611
+ "You are trying to load your data using the datasets library. If you wish to train using custom "
612
+ "captions please install the datasets library: `pip install datasets`. If you wish to load a "
613
+ "local folder containing images only, specify --instance_data_dir instead."
614
+ )
615
+ # Downloading and loading a dataset from the hub.
616
+ # See more about loading custom images at
617
+ # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script
618
+ dataset = load_dataset(
619
+ args.dataset_name,
620
+ args.dataset_config_name,
621
+ cache_dir=args.cache_dir,
622
+ )
623
+ # Preprocessing the datasets.
624
+ column_names = dataset["train"].column_names
625
+
626
+ # 6. Get the column names for input/target.
627
+ if args.image_column is None:
628
+ image_column = column_names[0]
629
+ logger.info(f"image column defaulting to {image_column}")
630
+ else:
631
+ image_column = args.image_column
632
+ if image_column not in column_names:
633
+ raise ValueError(
634
+ f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
635
+ )
636
+ instance_images = dataset["train"][image_column]
637
+
638
+ if args.caption_column is None:
639
+ logger.info(
640
+ "No caption column provided, defaulting to instance_prompt for all images. If your dataset "
641
+ "contains captions/prompts for the images, make sure to specify the "
642
+ "column as --caption_column"
643
+ )
644
+ self.custom_instance_prompts = None
645
+ else:
646
+ if args.caption_column not in column_names:
647
+ raise ValueError(
648
+ f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
649
+ )
650
+ custom_instance_prompts = dataset["train"][args.caption_column]
651
+ # create final list of captions according to --repeats
652
+ self.custom_instance_prompts = []
653
+ for caption in custom_instance_prompts:
654
+ self.custom_instance_prompts.extend(itertools.repeat(caption, repeats))
655
+ else:
656
+ self.instance_data_root = Path(instance_data_root)
657
+ if not self.instance_data_root.exists():
658
+ raise ValueError("Instance images root doesn't exists.")
659
+
660
+ instance_images = [Image.open(path) for path in list(Path(instance_data_root).iterdir())]
661
+ self.custom_instance_prompts = None
662
+
663
+ self.instance_images = []
664
+ for img in instance_images:
665
+ self.instance_images.extend(itertools.repeat(img, repeats))
666
+ self.num_instance_images = len(self.instance_images)
667
+ self._length = self.num_instance_images
668
+
669
+ if class_data_root is not None:
670
+ self.class_data_root = Path(class_data_root)
671
+ self.class_data_root.mkdir(parents=True, exist_ok=True)
672
+ self.class_images_path = list(self.class_data_root.iterdir())
673
+ if class_num is not None:
674
+ self.num_class_images = min(len(self.class_images_path), class_num)
675
+ else:
676
+ self.num_class_images = len(self.class_images_path)
677
+ self._length = max(self.num_class_images, self.num_instance_images)
678
+ else:
679
+ self.class_data_root = None
680
+
681
+ self.image_transforms = transforms.Compose(
682
+ [
683
+ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
684
+ transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
685
+ transforms.ToTensor(),
686
+ transforms.Normalize([0.5], [0.5]),
687
+ ]
688
+ )
689
+
690
+ def __len__(self):
691
+ return self._length
692
+
693
+ def __getitem__(self, index):
694
+ example = {}
695
+ instance_image = self.instance_images[index % self.num_instance_images]
696
+ instance_image = exif_transpose(instance_image)
697
+
698
+ if not instance_image.mode == "RGB":
699
+ instance_image = instance_image.convert("RGB")
700
+ example["instance_images"] = self.image_transforms(instance_image)
701
+
702
+ if self.custom_instance_prompts:
703
+ caption = self.custom_instance_prompts[index % self.num_instance_images]
704
+ if caption:
705
+ example["instance_prompt"] = caption
706
+ else:
707
+ example["instance_prompt"] = self.instance_prompt
708
+
709
+ else: # costum prompts were provided, but length does not match size of image dataset
710
+ example["instance_prompt"] = self.instance_prompt
711
+
712
+ if self.class_data_root:
713
+ class_image = Image.open(self.class_images_path[index % self.num_class_images])
714
+ class_image = exif_transpose(class_image)
715
+
716
+ if not class_image.mode == "RGB":
717
+ class_image = class_image.convert("RGB")
718
+ example["class_images"] = self.image_transforms(class_image)
719
+ example["class_prompt"] = self.class_prompt
720
+
721
+ return example
722
+
723
+
724
+ def collate_fn(examples, with_prior_preservation=False):
725
+ pixel_values = [example["instance_images"] for example in examples]
726
+ prompts = [example["instance_prompt"] for example in examples]
727
+
728
+ # Concat class and instance examples for prior preservation.
729
+ # We do this to avoid doing two forward passes.
730
+ if with_prior_preservation:
731
+ pixel_values += [example["class_images"] for example in examples]
732
+ prompts += [example["class_prompt"] for example in examples]
733
+
734
+ pixel_values = torch.stack(pixel_values)
735
+ pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
736
+
737
+ batch = {"pixel_values": pixel_values, "prompts": prompts}
738
+ return batch
739
+
740
+
741
+ class PromptDataset(Dataset):
742
+ "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
743
+
744
+ def __init__(self, prompt, num_samples):
745
+ self.prompt = prompt
746
+ self.num_samples = num_samples
747
+
748
+ def __len__(self):
749
+ return self.num_samples
750
+
751
+ def __getitem__(self, index):
752
+ example = {}
753
+ example["prompt"] = self.prompt
754
+ example["index"] = index
755
+ return example
756
+
757
+
758
+ def tokenize_prompt(tokenizer, prompt):
759
+ text_inputs = tokenizer(
760
+ prompt,
761
+ padding="max_length",
762
+ max_length=tokenizer.model_max_length,
763
+ truncation=True,
764
+ return_tensors="pt",
765
+ )
766
+ text_input_ids = text_inputs.input_ids
767
+ return text_input_ids
768
+
769
+
770
+ # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt
771
+ def encode_prompt(text_encoders, tokenizers, prompt, text_input_ids_list=None):
772
+ prompt_embeds_list = []
773
+
774
+ for i, text_encoder in enumerate(text_encoders):
775
+ if tokenizers is not None:
776
+ tokenizer = tokenizers[i]
777
+ text_input_ids = tokenize_prompt(tokenizer, prompt)
778
+ else:
779
+ assert text_input_ids_list is not None
780
+ text_input_ids = text_input_ids_list[i]
781
+
782
+ prompt_embeds = text_encoder(
783
+ text_input_ids.to(text_encoder.device),
784
+ output_hidden_states=True,
785
+ )
786
+
787
+ # We are only ALWAYS interested in the pooled output of the final text encoder
788
+ pooled_prompt_embeds = prompt_embeds[0]
789
+ prompt_embeds = prompt_embeds.hidden_states[-2]
790
+ bs_embed, seq_len, _ = prompt_embeds.shape
791
+ prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
792
+ prompt_embeds_list.append(prompt_embeds)
793
+
794
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
795
+ pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1)
796
+ return prompt_embeds, pooled_prompt_embeds
797
+
798
+
799
+ def main(args):
800
+ logging_dir = Path(args.output_dir, args.logging_dir)
801
+
802
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
803
+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
804
+ accelerator = Accelerator(
805
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
806
+ mixed_precision=args.mixed_precision,
807
+ log_with=args.report_to,
808
+ project_config=accelerator_project_config,
809
+ kwargs_handlers=[kwargs],
810
+ )
811
+
812
+ if args.report_to == "wandb":
813
+ if not is_wandb_available():
814
+ raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
815
+ import wandb
816
+
817
+ # Make one log on every process with the configuration for debugging.
818
+ logging.basicConfig(
819
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
820
+ datefmt="%m/%d/%Y %H:%M:%S",
821
+ level=logging.INFO,
822
+ )
823
+ logger.info(accelerator.state, main_process_only=False)
824
+ if accelerator.is_local_main_process:
825
+ transformers.utils.logging.set_verbosity_warning()
826
+ diffusers.utils.logging.set_verbosity_info()
827
+ else:
828
+ transformers.utils.logging.set_verbosity_error()
829
+ diffusers.utils.logging.set_verbosity_error()
830
+
831
+ # If passed along, set the training seed now.
832
+ if args.seed is not None:
833
+ set_seed(args.seed)
834
+
835
+ # Generate class images if prior preservation is enabled.
836
+ if args.with_prior_preservation:
837
+ class_images_dir = Path(args.class_data_dir)
838
+ if not class_images_dir.exists():
839
+ class_images_dir.mkdir(parents=True)
840
+ cur_class_images = len(list(class_images_dir.iterdir()))
841
+
842
+ if cur_class_images < args.num_class_images:
843
+ torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
844
+ if args.prior_generation_precision == "fp32":
845
+ torch_dtype = torch.float32
846
+ elif args.prior_generation_precision == "fp16":
847
+ torch_dtype = torch.float16
848
+ elif args.prior_generation_precision == "bf16":
849
+ torch_dtype = torch.bfloat16
850
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
851
+ args.pretrained_model_name_or_path,
852
+ torch_dtype=torch_dtype,
853
+ revision=args.revision,
854
+ variant=args.variant,
855
+ )
856
+ pipeline.set_progress_bar_config(disable=True)
857
+
858
+ num_new_images = args.num_class_images - cur_class_images
859
+ logger.info(f"Number of class images to sample: {num_new_images}.")
860
+
861
+ sample_dataset = PromptDataset(args.class_prompt, num_new_images)
862
+ sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
863
+
864
+ sample_dataloader = accelerator.prepare(sample_dataloader)
865
+ pipeline.to(accelerator.device)
866
+
867
+ for example in tqdm(
868
+ sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
869
+ ):
870
+ images = pipeline(example["prompt"]).images
871
+
872
+ for i, image in enumerate(images):
873
+ hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest()
874
+ image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
875
+ image.save(image_filename)
876
+
877
+ del pipeline
878
+ if torch.cuda.is_available():
879
+ torch.cuda.empty_cache()
880
+
881
+ # Handle the repository creation
882
+ if accelerator.is_main_process:
883
+ if args.output_dir is not None:
884
+ os.makedirs(args.output_dir, exist_ok=True)
885
+
886
+ if args.push_to_hub:
887
+ repo_id = create_repo(
888
+ repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
889
+ ).repo_id
890
+
891
+ # Load the tokenizers
892
+ tokenizer_one = AutoTokenizer.from_pretrained(
893
+ args.pretrained_model_name_or_path,
894
+ subfolder="tokenizer",
895
+ revision=args.revision,
896
+ use_fast=False,
897
+ )
898
+ tokenizer_two = AutoTokenizer.from_pretrained(
899
+ args.pretrained_model_name_or_path,
900
+ subfolder="tokenizer_2",
901
+ revision=args.revision,
902
+ use_fast=False,
903
+ )
904
+
905
+ # import correct text encoder classes
906
+ text_encoder_cls_one = import_model_class_from_model_name_or_path(
907
+ args.pretrained_model_name_or_path, args.revision
908
+ )
909
+ text_encoder_cls_two = import_model_class_from_model_name_or_path(
910
+ args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2"
911
+ )
912
+
913
+ # Load scheduler and models
914
+ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
915
+ text_encoder_one = text_encoder_cls_one.from_pretrained(
916
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant
917
+ )
918
+ text_encoder_two = text_encoder_cls_two.from_pretrained(
919
+ args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant
920
+ )
921
+ vae_path = (
922
+ args.pretrained_model_name_or_path
923
+ if args.pretrained_vae_model_name_or_path is None
924
+ else args.pretrained_vae_model_name_or_path
925
+ )
926
+ vae = AutoencoderKL.from_pretrained(
927
+ vae_path,
928
+ subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
929
+ revision=args.revision,
930
+ variant=args.variant,
931
+ )
932
+ unet = UNet2DConditionModel.from_pretrained(
933
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant
934
+ )
935
+
936
+ # We only train the additional adapter LoRA layers
937
+ vae.requires_grad_(False)
938
+ text_encoder_one.requires_grad_(False)
939
+ text_encoder_two.requires_grad_(False)
940
+ unet.requires_grad_(False)
941
+
942
+ # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision
943
+ # as these weights are only used for inference, keeping weights in full precision is not required.
944
+ weight_dtype = torch.float32
945
+ if accelerator.mixed_precision == "fp16":
946
+ weight_dtype = torch.float16
947
+ elif accelerator.mixed_precision == "bf16":
948
+ weight_dtype = torch.bfloat16
949
+
950
+ # Move unet, vae and text_encoder to device and cast to weight_dtype
951
+ unet.to(accelerator.device, dtype=weight_dtype)
952
+
953
+ # The VAE is always in float32 to avoid NaN losses.
954
+ vae.to(accelerator.device, dtype=torch.float32)
955
+
956
+ text_encoder_one.to(accelerator.device, dtype=weight_dtype)
957
+ text_encoder_two.to(accelerator.device, dtype=weight_dtype)
958
+
959
+ if args.enable_xformers_memory_efficient_attention:
960
+ if is_xformers_available():
961
+ import xformers
962
+
963
+ xformers_version = version.parse(xformers.__version__)
964
+ if xformers_version == version.parse("0.0.16"):
965
+ logger.warn(
966
+ "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, "
967
+ "please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
968
+ )
969
+ unet.enable_xformers_memory_efficient_attention()
970
+ else:
971
+ raise ValueError("xformers is not available. Make sure it is installed correctly")
972
+
973
+ if args.gradient_checkpointing:
974
+ unet.enable_gradient_checkpointing()
975
+ if args.train_text_encoder:
976
+ text_encoder_one.gradient_checkpointing_enable()
977
+ text_encoder_two.gradient_checkpointing_enable()
978
+
979
+ # now we will add new LoRA weights to the attention layers
980
+ unet_lora_config = LoraConfig(
981
+ r=args.rank,
982
+ lora_alpha=args.rank,
983
+ init_lora_weights="gaussian",
984
+ target_modules=["to_k", "to_q", "to_v", "to_out.0"],
985
+ )
986
+ unet.add_adapter(unet_lora_config)
987
+
988
+ # The text encoder comes from 🤗 transformers, so we cannot directly modify it.
989
+ # So, instead, we monkey-patch the forward calls of its attention-blocks.
990
+ if args.train_text_encoder:
991
+ text_lora_config = LoraConfig(
992
+ r=args.rank,
993
+ lora_alpha=args.rank,
994
+ init_lora_weights="gaussian",
995
+ target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
996
+ )
997
+ text_encoder_one.add_adapter(text_lora_config)
998
+ text_encoder_two.add_adapter(text_lora_config)
999
+
1000
+ # Make sure the trainable params are in float32.
1001
+ if args.mixed_precision == "fp16":
1002
+ models = [unet]
1003
+ if args.train_text_encoder:
1004
+ models.extend([text_encoder_one, text_encoder_two])
1005
+ for model in models:
1006
+ for param in model.parameters():
1007
+ # only upcast trainable parameters (LoRA) into fp32
1008
+ if param.requires_grad:
1009
+ param.data = param.to(torch.float32)
1010
+
1011
+ # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
1012
+ def save_model_hook(models, weights, output_dir):
1013
+ if accelerator.is_main_process:
1014
+ # there are only two options here. Either are just the unet attn processor layers
1015
+ # or there are the unet and text encoder atten layers
1016
+ unet_lora_layers_to_save = None
1017
+ text_encoder_one_lora_layers_to_save = None
1018
+ text_encoder_two_lora_layers_to_save = None
1019
+
1020
+ for model in models:
1021
+ if isinstance(model, type(accelerator.unwrap_model(unet))):
1022
+ unet_lora_layers_to_save = get_peft_model_state_dict(model)
1023
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))):
1024
+ text_encoder_one_lora_layers_to_save = get_peft_model_state_dict(model)
1025
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))):
1026
+ text_encoder_two_lora_layers_to_save = get_peft_model_state_dict(model)
1027
+ else:
1028
+ raise ValueError(f"unexpected save model: {model.__class__}")
1029
+
1030
+ # make sure to pop weight so that corresponding model is not saved again
1031
+ weights.pop()
1032
+
1033
+ StableDiffusionXLPipeline.save_lora_weights(
1034
+ output_dir,
1035
+ unet_lora_layers=unet_lora_layers_to_save,
1036
+ text_encoder_lora_layers=text_encoder_one_lora_layers_to_save,
1037
+ text_encoder_2_lora_layers=text_encoder_two_lora_layers_to_save,
1038
+ )
1039
+
1040
+ def load_model_hook(models, input_dir):
1041
+ unet_ = None
1042
+ text_encoder_one_ = None
1043
+ text_encoder_two_ = None
1044
+
1045
+ while len(models) > 0:
1046
+ model = models.pop()
1047
+
1048
+ if isinstance(model, type(accelerator.unwrap_model(unet))):
1049
+ unet_ = model
1050
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))):
1051
+ text_encoder_one_ = model
1052
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))):
1053
+ text_encoder_two_ = model
1054
+ else:
1055
+ raise ValueError(f"unexpected save model: {model.__class__}")
1056
+
1057
+ lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir)
1058
+ LoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_)
1059
+
1060
+ text_encoder_state_dict = {k: v for k, v in lora_state_dict.items() if "text_encoder." in k}
1061
+ LoraLoaderMixin.load_lora_into_text_encoder(
1062
+ text_encoder_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_one_
1063
+ )
1064
+
1065
+ text_encoder_2_state_dict = {k: v for k, v in lora_state_dict.items() if "text_encoder_2." in k}
1066
+ LoraLoaderMixin.load_lora_into_text_encoder(
1067
+ text_encoder_2_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_two_
1068
+ )
1069
+
1070
+ accelerator.register_save_state_pre_hook(save_model_hook)
1071
+ accelerator.register_load_state_pre_hook(load_model_hook)
1072
+
1073
+ # Enable TF32 for faster training on Ampere GPUs,
1074
+ # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
1075
+ if args.allow_tf32:
1076
+ torch.backends.cuda.matmul.allow_tf32 = True
1077
+
1078
+ if args.scale_lr:
1079
+ args.learning_rate = (
1080
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
1081
+ )
1082
+
1083
+ unet_lora_parameters = list(filter(lambda p: p.requires_grad, unet.parameters()))
1084
+
1085
+ if args.train_text_encoder:
1086
+ text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters()))
1087
+ text_lora_parameters_two = list(filter(lambda p: p.requires_grad, text_encoder_two.parameters()))
1088
+
1089
+ # Optimization parameters
1090
+ unet_lora_parameters_with_lr = {"params": unet_lora_parameters, "lr": args.learning_rate}
1091
+ if args.train_text_encoder:
1092
+ # different learning rate for text encoder and unet
1093
+ text_lora_parameters_one_with_lr = {
1094
+ "params": text_lora_parameters_one,
1095
+ "weight_decay": args.adam_weight_decay_text_encoder,
1096
+ "lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate,
1097
+ }
1098
+ text_lora_parameters_two_with_lr = {
1099
+ "params": text_lora_parameters_two,
1100
+ "weight_decay": args.adam_weight_decay_text_encoder,
1101
+ "lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate,
1102
+ }
1103
+ params_to_optimize = [
1104
+ unet_lora_parameters_with_lr,
1105
+ text_lora_parameters_one_with_lr,
1106
+ text_lora_parameters_two_with_lr,
1107
+ ]
1108
+ else:
1109
+ params_to_optimize = [unet_lora_parameters_with_lr]
1110
+
1111
+ # Optimizer creation
1112
+ if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"):
1113
+ logger.warn(
1114
+ f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]."
1115
+ "Defaulting to adamW"
1116
+ )
1117
+ args.optimizer = "adamw"
1118
+
1119
+ if args.use_8bit_adam and not args.optimizer.lower() == "adamw":
1120
+ logger.warn(
1121
+ f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was "
1122
+ f"set to {args.optimizer.lower()}"
1123
+ )
1124
+
1125
+ if args.optimizer.lower() == "adamw":
1126
+ if args.use_8bit_adam:
1127
+ try:
1128
+ import bitsandbytes as bnb
1129
+ except ImportError:
1130
+ raise ImportError(
1131
+ "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
1132
+ )
1133
+
1134
+ optimizer_class = bnb.optim.AdamW8bit
1135
+ else:
1136
+ optimizer_class = torch.optim.AdamW
1137
+
1138
+ optimizer = optimizer_class(
1139
+ params_to_optimize,
1140
+ betas=(args.adam_beta1, args.adam_beta2),
1141
+ weight_decay=args.adam_weight_decay,
1142
+ eps=args.adam_epsilon,
1143
+ )
1144
+
1145
+ if args.optimizer.lower() == "prodigy":
1146
+ try:
1147
+ import prodigyopt
1148
+ except ImportError:
1149
+ raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`")
1150
+
1151
+ optimizer_class = prodigyopt.Prodigy
1152
+
1153
+ optimizer = optimizer_class(
1154
+ params_to_optimize,
1155
+ lr=args.learning_rate,
1156
+ betas=(args.adam_beta1, args.adam_beta2),
1157
+ weight_decay=args.adam_weight_decay,
1158
+ eps=args.adam_epsilon,
1159
+ decouple=args.prodigy_decouple,
1160
+ use_bias_correction=args.prodigy_use_bias_correction,
1161
+ safeguard_warmup=args.prodigy_safeguard_warmup,
1162
+ )
1163
+
1164
+ # Dataset and DataLoaders creation:
1165
+ train_dataset = DreamBoothDataset(
1166
+ instance_data_root=args.instance_data_dir,
1167
+ instance_prompt=args.instance_prompt,
1168
+ class_prompt=args.class_prompt,
1169
+ class_data_root=args.class_data_dir if args.with_prior_preservation else None,
1170
+ class_num=args.num_class_images,
1171
+ size=args.resolution,
1172
+ repeats=args.repeats,
1173
+ center_crop=args.center_crop,
1174
+ )
1175
+
1176
+ train_dataloader = torch.utils.data.DataLoader(
1177
+ train_dataset,
1178
+ batch_size=args.train_batch_size,
1179
+ shuffle=True,
1180
+ collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
1181
+ num_workers=args.dataloader_num_workers,
1182
+ )
1183
+
1184
+ # Computes additional embeddings/ids required by the SDXL UNet.
1185
+ # regular text embeddings (when `train_text_encoder` is not True)
1186
+ # pooled text embeddings
1187
+ # time ids
1188
+
1189
+ def compute_time_ids():
1190
+ # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids
1191
+ original_size = (args.resolution, args.resolution)
1192
+ target_size = (args.resolution, args.resolution)
1193
+ crops_coords_top_left = (args.crops_coords_top_left_h, args.crops_coords_top_left_w)
1194
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
1195
+ add_time_ids = torch.tensor([add_time_ids])
1196
+ add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype)
1197
+ return add_time_ids
1198
+
1199
+ if not args.train_text_encoder:
1200
+ tokenizers = [tokenizer_one, tokenizer_two]
1201
+ text_encoders = [text_encoder_one, text_encoder_two]
1202
+
1203
+ def compute_text_embeddings(prompt, text_encoders, tokenizers):
1204
+ with torch.no_grad():
1205
+ prompt_embeds, pooled_prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt)
1206
+ prompt_embeds = prompt_embeds.to(accelerator.device)
1207
+ pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device)
1208
+ return prompt_embeds, pooled_prompt_embeds
1209
+
1210
+ # Handle instance prompt.
1211
+ instance_time_ids = compute_time_ids()
1212
+
1213
+ # If no type of tuning is done on the text_encoder and custom instance prompts are NOT
1214
+ # provided (i.e. the --instance_prompt is used for all images), we encode the instance prompt once to avoid
1215
+ # the redundant encoding.
1216
+ if not args.train_text_encoder and not train_dataset.custom_instance_prompts:
1217
+ instance_prompt_hidden_states, instance_pooled_prompt_embeds = compute_text_embeddings(
1218
+ args.instance_prompt, text_encoders, tokenizers
1219
+ )
1220
+
1221
+ # Handle class prompt for prior-preservation.
1222
+ if args.with_prior_preservation:
1223
+ class_time_ids = compute_time_ids()
1224
+ if not args.train_text_encoder:
1225
+ class_prompt_hidden_states, class_pooled_prompt_embeds = compute_text_embeddings(
1226
+ args.class_prompt, text_encoders, tokenizers
1227
+ )
1228
+
1229
+ # Clear the memory here
1230
+ if not args.train_text_encoder and not train_dataset.custom_instance_prompts:
1231
+ del tokenizers, text_encoders
1232
+ gc.collect()
1233
+ torch.cuda.empty_cache()
1234
+
1235
+ # If custom instance prompts are NOT provided (i.e. the instance prompt is used for all images),
1236
+ # pack the statically computed variables appropriately here. This is so that we don't
1237
+ # have to pass them to the dataloader.
1238
+ add_time_ids = instance_time_ids
1239
+ if args.with_prior_preservation:
1240
+ add_time_ids = torch.cat([add_time_ids, class_time_ids], dim=0)
1241
+
1242
+ if not train_dataset.custom_instance_prompts:
1243
+ if not args.train_text_encoder:
1244
+ prompt_embeds = instance_prompt_hidden_states
1245
+ unet_add_text_embeds = instance_pooled_prompt_embeds
1246
+ if args.with_prior_preservation:
1247
+ prompt_embeds = torch.cat([prompt_embeds, class_prompt_hidden_states], dim=0)
1248
+ unet_add_text_embeds = torch.cat([unet_add_text_embeds, class_pooled_prompt_embeds], dim=0)
1249
+ # if we're optmizing the text encoder (both if instance prompt is used for all images or custom prompts) we need to tokenize and encode the
1250
+ # batch prompts on all training steps
1251
+ else:
1252
+ tokens_one = tokenize_prompt(tokenizer_one, args.instance_prompt)
1253
+ tokens_two = tokenize_prompt(tokenizer_two, args.instance_prompt)
1254
+ if args.with_prior_preservation:
1255
+ class_tokens_one = tokenize_prompt(tokenizer_one, args.class_prompt)
1256
+ class_tokens_two = tokenize_prompt(tokenizer_two, args.class_prompt)
1257
+ tokens_one = torch.cat([tokens_one, class_tokens_one], dim=0)
1258
+ tokens_two = torch.cat([tokens_two, class_tokens_two], dim=0)
1259
+
1260
+ # Scheduler and math around the number of training steps.
1261
+ overrode_max_train_steps = False
1262
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1263
+ if args.max_train_steps is None:
1264
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1265
+ overrode_max_train_steps = True
1266
+
1267
+ lr_scheduler = get_scheduler(
1268
+ args.lr_scheduler,
1269
+ optimizer=optimizer,
1270
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
1271
+ num_training_steps=args.max_train_steps * accelerator.num_processes,
1272
+ num_cycles=args.lr_num_cycles,
1273
+ power=args.lr_power,
1274
+ )
1275
+
1276
+ # Prepare everything with our `accelerator`.
1277
+ if args.train_text_encoder:
1278
+ unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1279
+ unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler
1280
+ )
1281
+ else:
1282
+ unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1283
+ unet, optimizer, train_dataloader, lr_scheduler
1284
+ )
1285
+
1286
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
1287
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1288
+ if overrode_max_train_steps:
1289
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1290
+ # Afterwards we recalculate our number of training epochs
1291
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
1292
+
1293
+ # We need to initialize the trackers we use, and also store our configuration.
1294
+ # The trackers initializes automatically on the main process.
1295
+ if accelerator.is_main_process:
1296
+ accelerator.init_trackers("dreambooth-lora-sd-xl", config=vars(args))
1297
+
1298
+ # Train!
1299
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
1300
+
1301
+ logger.info("***** Running training *****")
1302
+ logger.info(f" Num examples = {len(train_dataset)}")
1303
+ logger.info(f" Num batches each epoch = {len(train_dataloader)}")
1304
+ logger.info(f" Num Epochs = {args.num_train_epochs}")
1305
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
1306
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
1307
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
1308
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
1309
+ global_step = 0
1310
+ first_epoch = 0
1311
+
1312
+ # Potentially load in the weights and states from a previous save
1313
+ if args.resume_from_checkpoint:
1314
+ if args.resume_from_checkpoint != "latest":
1315
+ path = os.path.basename(args.resume_from_checkpoint)
1316
+ else:
1317
+ # Get the mos recent checkpoint
1318
+ dirs = os.listdir(args.output_dir)
1319
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
1320
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
1321
+ path = dirs[-1] if len(dirs) > 0 else None
1322
+
1323
+ if path is None:
1324
+ accelerator.print(
1325
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
1326
+ )
1327
+ args.resume_from_checkpoint = None
1328
+ initial_global_step = 0
1329
+ else:
1330
+ accelerator.print(f"Resuming from checkpoint {path}")
1331
+ accelerator.load_state(os.path.join(args.output_dir, path))
1332
+ global_step = int(path.split("-")[1])
1333
+
1334
+ initial_global_step = global_step
1335
+ first_epoch = global_step // num_update_steps_per_epoch
1336
+
1337
+ else:
1338
+ initial_global_step = 0
1339
+
1340
+ progress_bar = tqdm(
1341
+ range(0, args.max_train_steps),
1342
+ initial=initial_global_step,
1343
+ desc="Steps",
1344
+ # Only show the progress bar once on each machine.
1345
+ disable=not accelerator.is_local_main_process,
1346
+ )
1347
+
1348
+ for epoch in range(first_epoch, args.num_train_epochs):
1349
+ unet.train()
1350
+ if args.train_text_encoder:
1351
+ text_encoder_one.train()
1352
+ text_encoder_two.train()
1353
+
1354
+ # set top parameter requires_grad = True for gradient checkpointing works
1355
+ text_encoder_one.text_model.embeddings.requires_grad_(True)
1356
+ text_encoder_two.text_model.embeddings.requires_grad_(True)
1357
+
1358
+ for step, batch in enumerate(train_dataloader):
1359
+ with accelerator.accumulate(unet):
1360
+ pixel_values = batch["pixel_values"].to(dtype=vae.dtype)
1361
+ prompts = batch["prompts"]
1362
+
1363
+ # encode batch prompts when custom prompts are provided for each image -
1364
+ if train_dataset.custom_instance_prompts:
1365
+ if not args.train_text_encoder:
1366
+ prompt_embeds, unet_add_text_embeds = compute_text_embeddings(
1367
+ prompts, text_encoders, tokenizers
1368
+ )
1369
+ else:
1370
+ tokens_one = tokenize_prompt(tokenizer_one, prompts)
1371
+ tokens_two = tokenize_prompt(tokenizer_two, prompts)
1372
+
1373
+ # Convert images to latent space
1374
+ model_input = vae.encode(pixel_values).latent_dist.sample()
1375
+ model_input = model_input * vae.config.scaling_factor
1376
+ if args.pretrained_vae_model_name_or_path is None:
1377
+ model_input = model_input.to(weight_dtype)
1378
+
1379
+ # Sample noise that we'll add to the latents
1380
+ noise = torch.randn_like(model_input)
1381
+ bsz = model_input.shape[0]
1382
+ # Sample a random timestep for each image
1383
+ timesteps = torch.randint(
1384
+ 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device
1385
+ )
1386
+ timesteps = timesteps.long()
1387
+
1388
+ # Add noise to the model input according to the noise magnitude at each timestep
1389
+ # (this is the forward diffusion process)
1390
+ noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
1391
+
1392
+ # Calculate the elements to repeat depending on the use of prior-preservation and custom captions.
1393
+ if not train_dataset.custom_instance_prompts:
1394
+ elems_to_repeat_text_embeds = bsz // 2 if args.with_prior_preservation else bsz
1395
+ elems_to_repeat_time_ids = bsz // 2 if args.with_prior_preservation else bsz
1396
+ else:
1397
+ elems_to_repeat_text_embeds = 1
1398
+ elems_to_repeat_time_ids = bsz // 2 if args.with_prior_preservation else bsz
1399
+
1400
+ # Predict the noise residual
1401
+ if not args.train_text_encoder:
1402
+ unet_added_conditions = {
1403
+ "time_ids": add_time_ids.repeat(elems_to_repeat_time_ids, 1),
1404
+ "text_embeds": unet_add_text_embeds.repeat(elems_to_repeat_text_embeds, 1),
1405
+ }
1406
+ prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat_text_embeds, 1, 1)
1407
+ model_pred = unet(
1408
+ noisy_model_input,
1409
+ timesteps,
1410
+ prompt_embeds_input,
1411
+ added_cond_kwargs=unet_added_conditions,
1412
+ ).sample
1413
+ else:
1414
+ unet_added_conditions = {"time_ids": add_time_ids.repeat(elems_to_repeat_time_ids, 1)}
1415
+ prompt_embeds, pooled_prompt_embeds = encode_prompt(
1416
+ text_encoders=[text_encoder_one, text_encoder_two],
1417
+ tokenizers=None,
1418
+ prompt=None,
1419
+ text_input_ids_list=[tokens_one, tokens_two],
1420
+ )
1421
+ unet_added_conditions.update(
1422
+ {"text_embeds": pooled_prompt_embeds.repeat(elems_to_repeat_text_embeds, 1)}
1423
+ )
1424
+ prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat_text_embeds, 1, 1)
1425
+ model_pred = unet(
1426
+ noisy_model_input, timesteps, prompt_embeds_input, added_cond_kwargs=unet_added_conditions
1427
+ ).sample
1428
+
1429
+ # Get the target for loss depending on the prediction type
1430
+ if noise_scheduler.config.prediction_type == "epsilon":
1431
+ target = noise
1432
+ elif noise_scheduler.config.prediction_type == "v_prediction":
1433
+ target = noise_scheduler.get_velocity(model_input, noise, timesteps)
1434
+ else:
1435
+ raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
1436
+
1437
+ if args.with_prior_preservation:
1438
+ # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
1439
+ model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
1440
+ target, target_prior = torch.chunk(target, 2, dim=0)
1441
+
1442
+ # Compute prior loss
1443
+ prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
1444
+
1445
+ if args.snr_gamma is None:
1446
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1447
+ else:
1448
+ # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556.
1449
+ # Since we predict the noise instead of x_0, the original formulation is slightly changed.
1450
+ # This is discussed in Section 4.2 of the same paper.
1451
+ snr = compute_snr(noise_scheduler, timesteps)
1452
+ base_weight = (
1453
+ torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min(dim=1)[0] / snr
1454
+ )
1455
+
1456
+ if noise_scheduler.config.prediction_type == "v_prediction":
1457
+ # Velocity objective needs to be floored to an SNR weight of one.
1458
+ mse_loss_weights = base_weight + 1
1459
+ else:
1460
+ # Epsilon and sample both use the same loss weights.
1461
+ mse_loss_weights = base_weight
1462
+
1463
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
1464
+ loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights
1465
+ loss = loss.mean()
1466
+
1467
+ if args.with_prior_preservation:
1468
+ # Add the prior loss to the instance loss.
1469
+ loss = loss + args.prior_loss_weight * prior_loss
1470
+
1471
+ accelerator.backward(loss)
1472
+ if accelerator.sync_gradients:
1473
+ params_to_clip = (
1474
+ itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two)
1475
+ if args.train_text_encoder
1476
+ else unet_lora_parameters
1477
+ )
1478
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1479
+ optimizer.step()
1480
+ lr_scheduler.step()
1481
+ optimizer.zero_grad()
1482
+
1483
+ # Checks if the accelerator has performed an optimization step behind the scenes
1484
+ if accelerator.sync_gradients:
1485
+ progress_bar.update(1)
1486
+ global_step += 1
1487
+
1488
+ if accelerator.is_main_process:
1489
+ if global_step % args.checkpointing_steps == 0:
1490
+ # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
1491
+ if args.checkpoints_total_limit is not None:
1492
+ checkpoints = os.listdir(args.output_dir)
1493
+ checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1494
+ checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1495
+
1496
+ # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1497
+ if len(checkpoints) >= args.checkpoints_total_limit:
1498
+ num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1499
+ removing_checkpoints = checkpoints[0:num_to_remove]
1500
+
1501
+ logger.info(
1502
+ f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1503
+ )
1504
+ logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
1505
+
1506
+ for removing_checkpoint in removing_checkpoints:
1507
+ removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1508
+ shutil.rmtree(removing_checkpoint)
1509
+
1510
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1511
+ accelerator.save_state(save_path)
1512
+ logger.info(f"Saved state to {save_path}")
1513
+
1514
+ logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1515
+ progress_bar.set_postfix(**logs)
1516
+ accelerator.log(logs, step=global_step)
1517
+
1518
+ if global_step >= args.max_train_steps:
1519
+ break
1520
+
1521
+ if accelerator.is_main_process:
1522
+ if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
1523
+ logger.info(
1524
+ f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
1525
+ f" {args.validation_prompt}."
1526
+ )
1527
+ # create pipeline
1528
+ if not args.train_text_encoder:
1529
+ text_encoder_one = text_encoder_cls_one.from_pretrained(
1530
+ args.pretrained_model_name_or_path,
1531
+ subfolder="text_encoder",
1532
+ revision=args.revision,
1533
+ variant=args.variant,
1534
+ )
1535
+ text_encoder_two = text_encoder_cls_two.from_pretrained(
1536
+ args.pretrained_model_name_or_path,
1537
+ subfolder="text_encoder_2",
1538
+ revision=args.revision,
1539
+ variant=args.variant,
1540
+ )
1541
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
1542
+ args.pretrained_model_name_or_path,
1543
+ vae=vae,
1544
+ text_encoder=accelerator.unwrap_model(text_encoder_one),
1545
+ text_encoder_2=accelerator.unwrap_model(text_encoder_two),
1546
+ unet=accelerator.unwrap_model(unet),
1547
+ revision=args.revision,
1548
+ variant=args.variant,
1549
+ torch_dtype=weight_dtype,
1550
+ )
1551
+
1552
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
1553
+ scheduler_args = {}
1554
+
1555
+ if "variance_type" in pipeline.scheduler.config:
1556
+ variance_type = pipeline.scheduler.config.variance_type
1557
+
1558
+ if variance_type in ["learned", "learned_range"]:
1559
+ variance_type = "fixed_small"
1560
+
1561
+ scheduler_args["variance_type"] = variance_type
1562
+
1563
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
1564
+ pipeline.scheduler.config, **scheduler_args
1565
+ )
1566
+
1567
+ pipeline = pipeline.to(accelerator.device)
1568
+ pipeline.set_progress_bar_config(disable=True)
1569
+
1570
+ # run inference
1571
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
1572
+ pipeline_args = {"prompt": args.validation_prompt}
1573
+
1574
+ with torch.cuda.amp.autocast():
1575
+ images = [
1576
+ pipeline(**pipeline_args, generator=generator).images[0]
1577
+ for _ in range(args.num_validation_images)
1578
+ ]
1579
+
1580
+ for tracker in accelerator.trackers:
1581
+ if tracker.name == "tensorboard":
1582
+ np_images = np.stack([np.asarray(img) for img in images])
1583
+ tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
1584
+ if tracker.name == "wandb":
1585
+ tracker.log(
1586
+ {
1587
+ "validation": [
1588
+ wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
1589
+ for i, image in enumerate(images)
1590
+ ]
1591
+ }
1592
+ )
1593
+
1594
+ del pipeline
1595
+ torch.cuda.empty_cache()
1596
+
1597
+ # Save the lora layers
1598
+ accelerator.wait_for_everyone()
1599
+ if accelerator.is_main_process:
1600
+ unet = accelerator.unwrap_model(unet)
1601
+ unet = unet.to(torch.float32)
1602
+ unet_lora_layers = get_peft_model_state_dict(unet)
1603
+
1604
+ if args.train_text_encoder:
1605
+ text_encoder_one = accelerator.unwrap_model(text_encoder_one)
1606
+ text_encoder_lora_layers = get_peft_model_state_dict(text_encoder_one.to(torch.float32))
1607
+ text_encoder_two = accelerator.unwrap_model(text_encoder_two)
1608
+ text_encoder_2_lora_layers = get_peft_model_state_dict(text_encoder_two.to(torch.float32))
1609
+ else:
1610
+ text_encoder_lora_layers = None
1611
+ text_encoder_2_lora_layers = None
1612
+
1613
+ StableDiffusionXLPipeline.save_lora_weights(
1614
+ save_directory=args.output_dir,
1615
+ unet_lora_layers=unet_lora_layers,
1616
+ text_encoder_lora_layers=text_encoder_lora_layers,
1617
+ text_encoder_2_lora_layers=text_encoder_2_lora_layers,
1618
+ )
1619
+
1620
+ # Final inference
1621
+ # Load previous pipeline
1622
+ vae = AutoencoderKL.from_pretrained(
1623
+ vae_path,
1624
+ subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
1625
+ revision=args.revision,
1626
+ variant=args.variant,
1627
+ torch_dtype=weight_dtype,
1628
+ )
1629
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
1630
+ args.pretrained_model_name_or_path,
1631
+ vae=vae,
1632
+ revision=args.revision,
1633
+ variant=args.variant,
1634
+ torch_dtype=weight_dtype,
1635
+ )
1636
+
1637
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
1638
+ scheduler_args = {}
1639
+
1640
+ if "variance_type" in pipeline.scheduler.config:
1641
+ variance_type = pipeline.scheduler.config.variance_type
1642
+
1643
+ if variance_type in ["learned", "learned_range"]:
1644
+ variance_type = "fixed_small"
1645
+
1646
+ scheduler_args["variance_type"] = variance_type
1647
+
1648
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
1649
+
1650
+ # load attention processors
1651
+ pipeline.load_lora_weights(args.output_dir)
1652
+
1653
+ # run inference
1654
+ images = []
1655
+ if args.validation_prompt and args.num_validation_images > 0:
1656
+ pipeline = pipeline.to(accelerator.device)
1657
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
1658
+ images = [
1659
+ pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
1660
+ for _ in range(args.num_validation_images)
1661
+ ]
1662
+
1663
+ for tracker in accelerator.trackers:
1664
+ if tracker.name == "tensorboard":
1665
+ np_images = np.stack([np.asarray(img) for img in images])
1666
+ tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC")
1667
+ if tracker.name == "wandb":
1668
+ tracker.log(
1669
+ {
1670
+ "test": [
1671
+ wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
1672
+ for i, image in enumerate(images)
1673
+ ]
1674
+ }
1675
+ )
1676
+
1677
+ if args.push_to_hub:
1678
+ save_model_card(
1679
+ repo_id,
1680
+ images=images,
1681
+ base_model=args.pretrained_model_name_or_path,
1682
+ train_text_encoder=args.train_text_encoder,
1683
+ instance_prompt=args.instance_prompt,
1684
+ validation_prompt=args.validation_prompt,
1685
+ repo_folder=args.output_dir,
1686
+ vae_path=args.pretrained_vae_model_name_or_path,
1687
+ )
1688
+ upload_folder(
1689
+ repo_id=repo_id,
1690
+ folder_path=args.output_dir,
1691
+ commit_message="End of training",
1692
+ ignore_patterns=["step_*", "epoch_*"],
1693
+ )
1694
+
1695
+ accelerator.end_training()
1696
+
1697
+
1698
+ if __name__ == "__main__":
1699
+ args = parse_args()
1700
+ main(args)