DmitrMakeev commited on
Commit
6725ae8
1 Parent(s): 3a7a9d8

Create train_dreambooth_lora_sdxl.py

Browse files
Files changed (1) hide show
  1. train_dreambooth_lora_sdxl.py +1403 -0
train_dreambooth_lora_sdxl.py ADDED
@@ -0,0 +1,1403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ import argparse
17
+ import gc
18
+ import hashlib
19
+ import itertools
20
+ import logging
21
+ import math
22
+ import os
23
+ import shutil
24
+ import warnings
25
+ from pathlib import Path
26
+ from typing import Dict
27
+
28
+ import numpy as np
29
+ import torch
30
+ import torch.nn.functional as F
31
+ import torch.utils.checkpoint
32
+ import transformers
33
+ from accelerate import Accelerator
34
+ from accelerate.logging import get_logger
35
+ from accelerate.utils import ProjectConfiguration, set_seed
36
+ from huggingface_hub import create_repo, upload_folder
37
+ from packaging import version
38
+ from PIL import Image
39
+ from PIL.ImageOps import exif_transpose
40
+ from torch.utils.data import Dataset
41
+ from torchvision import transforms
42
+ from tqdm.auto import tqdm
43
+ from transformers import AutoTokenizer, PretrainedConfig
44
+
45
+ import diffusers
46
+ from diffusers import (
47
+ AutoencoderKL,
48
+ DDPMScheduler,
49
+ DPMSolverMultistepScheduler,
50
+ StableDiffusionXLPipeline,
51
+ UNet2DConditionModel,
52
+ )
53
+ from diffusers.loaders import LoraLoaderMixin, text_encoder_lora_state_dict
54
+ from diffusers.models.attention_processor import LoRAAttnProcessor, LoRAAttnProcessor2_0
55
+ from diffusers.optimization import get_scheduler
56
+ from diffusers.utils import check_min_version, is_wandb_available
57
+ from diffusers.utils.import_utils import is_xformers_available
58
+
59
+
60
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
61
+ check_min_version("0.21.0.dev0")
62
+
63
+ logger = get_logger(__name__)
64
+
65
+
66
+ def save_model_card(
67
+ repo_id: str, images=None, dataset_id=str, base_model=str, train_text_encoder=False, prompt=str, repo_folder=None, vae_path=None
68
+ ):
69
+ img_str = ""
70
+ for i, image in enumerate(images):
71
+ image.save(os.path.join(repo_folder, f"image_{i}.png"))
72
+ img_str += f"![img_{i}](./image_{i}.png)\n"
73
+
74
+ yaml = f"""
75
+ ---
76
+ base_model: {base_model}
77
+ instance_prompt: {prompt}
78
+ tags:
79
+ - stable-diffusion-xl
80
+ - stable-diffusion-xl-diffusers
81
+ - text-to-image
82
+ - diffusers
83
+ - lora
84
+ inference: false
85
+ datasets:
86
+ - {dataset_id}
87
+ ---
88
+ """
89
+ model_card = f"""
90
+ # LoRA DreamBooth - {repo_id}
91
+ These are LoRA adaption weights for {base_model}.
92
+ The weights were trained on the concept prompt:
93
+ `{prompt}`
94
+ Use this keyword to trigger your custom model in your prompts.
95
+ LoRA for the text encoder was enabled: {train_text_encoder}.
96
+ Special VAE used for training: {vae_path}.
97
+ ## Usage
98
+ Make sure to upgrade diffusers to >= 0.19.0:
99
+ ```
100
+ pip install diffusers --upgrade
101
+ ```
102
+ In addition make sure to install transformers, safetensors, accelerate as well as the invisible watermark:
103
+ ```
104
+ pip install invisible_watermark transformers accelerate safetensors
105
+ ```
106
+ To just use the base model, you can run:
107
+ ```python
108
+ import torch
109
+ from diffusers import DiffusionPipeline, AutoencoderKL
110
+ vae = AutoencoderKL.from_pretrained('{vae_path}', torch_dtype=torch.float16)
111
+ pipe = DiffusionPipeline.from_pretrained(
112
+ "stabilityai/stable-diffusion-xl-base-1.0",
113
+ vae=vae, torch_dtype=torch.float16, variant="fp16",
114
+ use_safetensors=True
115
+ )
116
+ # This is where you load your trained weights
117
+ pipe.load_lora_weights('{repo_id}')
118
+ pipe.to("cuda")
119
+ prompt = "A majestic {prompt} jumping from a big stone at night"
120
+ image = pipe(prompt=prompt, num_inference_steps=50).images[0]
121
+ ```
122
+ """
123
+ with open(os.path.join(repo_folder, "README.md"), "w") as f:
124
+ f.write(yaml + model_card)
125
+
126
+
127
+ def import_model_class_from_model_name_or_path(
128
+ pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
129
+ ):
130
+ text_encoder_config = PretrainedConfig.from_pretrained(
131
+ pretrained_model_name_or_path, subfolder=subfolder, revision=revision
132
+ )
133
+ model_class = text_encoder_config.architectures[0]
134
+
135
+ if model_class == "CLIPTextModel":
136
+ from transformers import CLIPTextModel
137
+
138
+ return CLIPTextModel
139
+ elif model_class == "CLIPTextModelWithProjection":
140
+ from transformers import CLIPTextModelWithProjection
141
+
142
+ return CLIPTextModelWithProjection
143
+ else:
144
+ raise ValueError(f"{model_class} is not supported.")
145
+
146
+
147
+ def parse_args(input_args=None):
148
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
149
+ parser.add_argument(
150
+ "--pretrained_model_name_or_path",
151
+ type=str,
152
+ default=None,
153
+ required=True,
154
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
155
+ )
156
+ parser.add_argument(
157
+ "--pretrained_vae_model_name_or_path",
158
+ type=str,
159
+ default=None,
160
+ help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.",
161
+ )
162
+ parser.add_argument(
163
+ "--revision",
164
+ type=str,
165
+ default=None,
166
+ required=False,
167
+ help="Revision of pretrained model identifier from huggingface.co/models.",
168
+ )
169
+ parser.add_argument(
170
+ "--dataset_id",
171
+ type=str,
172
+ default=None,
173
+ required=True,
174
+ help="The dataset ID you want to train images from",
175
+ )
176
+ parser.add_argument(
177
+ "--instance_data_dir",
178
+ type=str,
179
+ default=None,
180
+ required=True,
181
+ help="A folder containing the training data of instance images.",
182
+ )
183
+ parser.add_argument(
184
+ "--class_data_dir",
185
+ type=str,
186
+ default=None,
187
+ required=False,
188
+ help="A folder containing the training data of class images.",
189
+ )
190
+ parser.add_argument(
191
+ "--instance_prompt",
192
+ type=str,
193
+ default=None,
194
+ required=True,
195
+ help="The prompt with identifier specifying the instance",
196
+ )
197
+ parser.add_argument(
198
+ "--class_prompt",
199
+ type=str,
200
+ default=None,
201
+ help="The prompt to specify images in the same class as provided instance images.",
202
+ )
203
+ parser.add_argument(
204
+ "--validation_prompt",
205
+ type=str,
206
+ default=None,
207
+ help="A prompt that is used during validation to verify that the model is learning.",
208
+ )
209
+ parser.add_argument(
210
+ "--num_validation_images",
211
+ type=int,
212
+ default=4,
213
+ help="Number of images that should be generated during validation with `validation_prompt`.",
214
+ )
215
+ parser.add_argument(
216
+ "--validation_epochs",
217
+ type=int,
218
+ default=50,
219
+ help=(
220
+ "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt"
221
+ " `args.validation_prompt` multiple times: `args.num_validation_images`."
222
+ ),
223
+ )
224
+ parser.add_argument(
225
+ "--with_prior_preservation",
226
+ default=False,
227
+ action="store_true",
228
+ help="Flag to add prior preservation loss.",
229
+ )
230
+ parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
231
+ parser.add_argument(
232
+ "--num_class_images",
233
+ type=int,
234
+ default=100,
235
+ help=(
236
+ "Minimal class images for prior preservation loss. If there are not enough images already present in"
237
+ " class_data_dir, additional images will be sampled with class_prompt."
238
+ ),
239
+ )
240
+ parser.add_argument(
241
+ "--output_dir",
242
+ type=str,
243
+ default="lora-dreambooth-model",
244
+ help="The output directory where the model predictions and checkpoints will be written.",
245
+ )
246
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
247
+ parser.add_argument(
248
+ "--resolution",
249
+ type=int,
250
+ default=1024,
251
+ help=(
252
+ "The resolution for input images, all the images in the train/validation dataset will be resized to this"
253
+ " resolution"
254
+ ),
255
+ )
256
+ parser.add_argument(
257
+ "--crops_coords_top_left_h",
258
+ type=int,
259
+ default=0,
260
+ help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."),
261
+ )
262
+ parser.add_argument(
263
+ "--crops_coords_top_left_w",
264
+ type=int,
265
+ default=0,
266
+ help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."),
267
+ )
268
+ parser.add_argument(
269
+ "--center_crop",
270
+ default=False,
271
+ action="store_true",
272
+ help=(
273
+ "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
274
+ " cropped. The images will be resized to the resolution first before cropping."
275
+ ),
276
+ )
277
+ parser.add_argument(
278
+ "--train_text_encoder",
279
+ action="store_true",
280
+ help="Whether to train the text encoder. If set, the text encoder should be float32 precision.",
281
+ )
282
+ parser.add_argument(
283
+ "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
284
+ )
285
+ parser.add_argument(
286
+ "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
287
+ )
288
+ parser.add_argument("--num_train_epochs", type=int, default=1)
289
+ parser.add_argument(
290
+ "--max_train_steps",
291
+ type=int,
292
+ default=None,
293
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
294
+ )
295
+ parser.add_argument(
296
+ "--checkpointing_steps",
297
+ type=int,
298
+ default=500,
299
+ help=(
300
+ "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
301
+ " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
302
+ " training using `--resume_from_checkpoint`."
303
+ ),
304
+ )
305
+ parser.add_argument(
306
+ "--checkpoints_total_limit",
307
+ type=int,
308
+ default=None,
309
+ help=("Max number of checkpoints to store."),
310
+ )
311
+ parser.add_argument(
312
+ "--resume_from_checkpoint",
313
+ type=str,
314
+ default=None,
315
+ help=(
316
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
317
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
318
+ ),
319
+ )
320
+ parser.add_argument(
321
+ "--gradient_accumulation_steps",
322
+ type=int,
323
+ default=1,
324
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
325
+ )
326
+ parser.add_argument(
327
+ "--gradient_checkpointing",
328
+ action="store_true",
329
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
330
+ )
331
+ parser.add_argument(
332
+ "--learning_rate",
333
+ type=float,
334
+ default=5e-4,
335
+ help="Initial learning rate (after the potential warmup period) to use.",
336
+ )
337
+ parser.add_argument(
338
+ "--scale_lr",
339
+ action="store_true",
340
+ default=False,
341
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
342
+ )
343
+ parser.add_argument(
344
+ "--lr_scheduler",
345
+ type=str,
346
+ default="constant",
347
+ help=(
348
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
349
+ ' "constant", "constant_with_warmup"]'
350
+ ),
351
+ )
352
+ parser.add_argument(
353
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
354
+ )
355
+ parser.add_argument(
356
+ "--lr_num_cycles",
357
+ type=int,
358
+ default=1,
359
+ help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
360
+ )
361
+ parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
362
+ parser.add_argument(
363
+ "--dataloader_num_workers",
364
+ type=int,
365
+ default=0,
366
+ help=(
367
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
368
+ ),
369
+ )
370
+ parser.add_argument(
371
+ "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
372
+ )
373
+ parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
374
+ parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
375
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
376
+ parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
377
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
378
+ parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
379
+ parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
380
+ parser.add_argument(
381
+ "--hub_model_id",
382
+ type=str,
383
+ default=None,
384
+ help="The name of the repository to keep in sync with the local `output_dir`.",
385
+ )
386
+ parser.add_argument(
387
+ "--logging_dir",
388
+ type=str,
389
+ default="logs",
390
+ help=(
391
+ "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
392
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
393
+ ),
394
+ )
395
+ parser.add_argument(
396
+ "--allow_tf32",
397
+ action="store_true",
398
+ help=(
399
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
400
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
401
+ ),
402
+ )
403
+ parser.add_argument(
404
+ "--report_to",
405
+ type=str,
406
+ default="tensorboard",
407
+ help=(
408
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
409
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
410
+ ),
411
+ )
412
+ parser.add_argument(
413
+ "--mixed_precision",
414
+ type=str,
415
+ default=None,
416
+ choices=["no", "fp16", "bf16"],
417
+ help=(
418
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
419
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
420
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
421
+ ),
422
+ )
423
+ parser.add_argument(
424
+ "--prior_generation_precision",
425
+ type=str,
426
+ default=None,
427
+ choices=["no", "fp32", "fp16", "bf16"],
428
+ help=(
429
+ "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
430
+ " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
431
+ ),
432
+ )
433
+ parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
434
+ parser.add_argument(
435
+ "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
436
+ )
437
+ parser.add_argument(
438
+ "--rank",
439
+ type=int,
440
+ default=4,
441
+ help=("The dimension of the LoRA update matrices."),
442
+ )
443
+
444
+ if input_args is not None:
445
+ args = parser.parse_args(input_args)
446
+ else:
447
+ args = parser.parse_args()
448
+
449
+ env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
450
+ if env_local_rank != -1 and env_local_rank != args.local_rank:
451
+ args.local_rank = env_local_rank
452
+
453
+ if args.with_prior_preservation:
454
+ if args.class_data_dir is None:
455
+ raise ValueError("You must specify a data directory for class images.")
456
+ if args.class_prompt is None:
457
+ raise ValueError("You must specify prompt for class images.")
458
+ else:
459
+ # logger is not available yet
460
+ if args.class_data_dir is not None:
461
+ warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
462
+ if args.class_prompt is not None:
463
+ warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
464
+
465
+ return args
466
+
467
+
468
+ class DreamBoothDataset(Dataset):
469
+ """
470
+ A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
471
+ It pre-processes the images.
472
+ """
473
+
474
+ def __init__(
475
+ self,
476
+ instance_data_root,
477
+ class_data_root=None,
478
+ class_num=None,
479
+ size=1024,
480
+ center_crop=False,
481
+ ):
482
+ self.size = size
483
+ self.center_crop = center_crop
484
+
485
+ self.instance_data_root = Path(instance_data_root)
486
+ if not self.instance_data_root.exists():
487
+ raise ValueError("Instance images root doesn't exists.")
488
+
489
+ self.instance_images_path = list(Path(instance_data_root).iterdir())
490
+ self.num_instance_images = len(self.instance_images_path)
491
+ self._length = self.num_instance_images
492
+
493
+ if class_data_root is not None:
494
+ self.class_data_root = Path(class_data_root)
495
+ self.class_data_root.mkdir(parents=True, exist_ok=True)
496
+ self.class_images_path = list(self.class_data_root.iterdir())
497
+ if class_num is not None:
498
+ self.num_class_images = min(len(self.class_images_path), class_num)
499
+ else:
500
+ self.num_class_images = len(self.class_images_path)
501
+ self._length = max(self.num_class_images, self.num_instance_images)
502
+ else:
503
+ self.class_data_root = None
504
+
505
+ self.image_transforms = transforms.Compose(
506
+ [
507
+ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
508
+ transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
509
+ transforms.ToTensor(),
510
+ transforms.Normalize([0.5], [0.5]),
511
+ ]
512
+ )
513
+
514
+ def __len__(self):
515
+ return self._length
516
+
517
+ def __getitem__(self, index):
518
+ example = {}
519
+ instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
520
+ instance_image = exif_transpose(instance_image)
521
+
522
+ if not instance_image.mode == "RGB":
523
+ instance_image = instance_image.convert("RGB")
524
+ example["instance_images"] = self.image_transforms(instance_image)
525
+
526
+ if self.class_data_root:
527
+ class_image = Image.open(self.class_images_path[index % self.num_class_images])
528
+ class_image = exif_transpose(class_image)
529
+
530
+ if not class_image.mode == "RGB":
531
+ class_image = class_image.convert("RGB")
532
+ example["class_images"] = self.image_transforms(class_image)
533
+
534
+ return example
535
+
536
+
537
+ def collate_fn(examples, with_prior_preservation=False):
538
+ pixel_values = [example["instance_images"] for example in examples]
539
+
540
+ # Concat class and instance examples for prior preservation.
541
+ # We do this to avoid doing two forward passes.
542
+ if with_prior_preservation:
543
+ pixel_values += [example["class_images"] for example in examples]
544
+
545
+ pixel_values = torch.stack(pixel_values)
546
+ pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
547
+
548
+ batch = {"pixel_values": pixel_values}
549
+ return batch
550
+
551
+
552
+ class PromptDataset(Dataset):
553
+ "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
554
+
555
+ def __init__(self, prompt, num_samples):
556
+ self.prompt = prompt
557
+ self.num_samples = num_samples
558
+
559
+ def __len__(self):
560
+ return self.num_samples
561
+
562
+ def __getitem__(self, index):
563
+ example = {}
564
+ example["prompt"] = self.prompt
565
+ example["index"] = index
566
+ return example
567
+
568
+
569
+ def tokenize_prompt(tokenizer, prompt):
570
+ text_inputs = tokenizer(
571
+ prompt,
572
+ padding="max_length",
573
+ max_length=tokenizer.model_max_length,
574
+ truncation=True,
575
+ return_tensors="pt",
576
+ )
577
+ text_input_ids = text_inputs.input_ids
578
+ return text_input_ids
579
+
580
+
581
+ # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt
582
+ def encode_prompt(text_encoders, tokenizers, prompt, text_input_ids_list=None):
583
+ prompt_embeds_list = []
584
+
585
+ for i, text_encoder in enumerate(text_encoders):
586
+ if tokenizers is not None:
587
+ tokenizer = tokenizers[i]
588
+ text_input_ids = tokenize_prompt(tokenizer, prompt)
589
+ else:
590
+ assert text_input_ids_list is not None
591
+ text_input_ids = text_input_ids_list[i]
592
+
593
+ prompt_embeds = text_encoder(
594
+ text_input_ids.to(text_encoder.device),
595
+ output_hidden_states=True,
596
+ )
597
+
598
+ # We are only ALWAYS interested in the pooled output of the final text encoder
599
+ pooled_prompt_embeds = prompt_embeds[0]
600
+ prompt_embeds = prompt_embeds.hidden_states[-2]
601
+ bs_embed, seq_len, _ = prompt_embeds.shape
602
+ prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
603
+ prompt_embeds_list.append(prompt_embeds)
604
+
605
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
606
+ pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1)
607
+ return prompt_embeds, pooled_prompt_embeds
608
+
609
+
610
+ def unet_attn_processors_state_dict(unet) -> Dict[str, torch.tensor]:
611
+ """
612
+ Returns:
613
+ a state dict containing just the attention processor parameters.
614
+ """
615
+ attn_processors = unet.attn_processors
616
+
617
+ attn_processors_state_dict = {}
618
+
619
+ for attn_processor_key, attn_processor in attn_processors.items():
620
+ for parameter_key, parameter in attn_processor.state_dict().items():
621
+ attn_processors_state_dict[f"{attn_processor_key}.{parameter_key}"] = parameter
622
+
623
+ return attn_processors_state_dict
624
+
625
+
626
+ def main(args):
627
+ logging_dir = Path(args.output_dir, args.logging_dir)
628
+
629
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
630
+
631
+ accelerator = Accelerator(
632
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
633
+ mixed_precision=args.mixed_precision,
634
+ log_with=args.report_to,
635
+ project_config=accelerator_project_config,
636
+ )
637
+
638
+ if args.report_to == "wandb":
639
+ if not is_wandb_available():
640
+ raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
641
+ import wandb
642
+
643
+ # Make one log on every process with the configuration for debugging.
644
+ logging.basicConfig(
645
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
646
+ datefmt="%m/%d/%Y %H:%M:%S",
647
+ level=logging.INFO,
648
+ )
649
+ logger.info(accelerator.state, main_process_only=False)
650
+ if accelerator.is_local_main_process:
651
+ transformers.utils.logging.set_verbosity_warning()
652
+ diffusers.utils.logging.set_verbosity_info()
653
+ else:
654
+ transformers.utils.logging.set_verbosity_error()
655
+ diffusers.utils.logging.set_verbosity_error()
656
+
657
+ # If passed along, set the training seed now.
658
+ if args.seed is not None:
659
+ set_seed(args.seed)
660
+
661
+ # Generate class images if prior preservation is enabled.
662
+ if args.with_prior_preservation:
663
+ class_images_dir = Path(args.class_data_dir)
664
+ if not class_images_dir.exists():
665
+ class_images_dir.mkdir(parents=True)
666
+ cur_class_images = len(list(class_images_dir.iterdir()))
667
+
668
+ if cur_class_images < args.num_class_images:
669
+ torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
670
+ if args.prior_generation_precision == "fp32":
671
+ torch_dtype = torch.float32
672
+ elif args.prior_generation_precision == "fp16":
673
+ torch_dtype = torch.float16
674
+ elif args.prior_generation_precision == "bf16":
675
+ torch_dtype = torch.bfloat16
676
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
677
+ args.pretrained_model_name_or_path,
678
+ torch_dtype=torch_dtype,
679
+ revision=args.revision,
680
+ )
681
+ pipeline.set_progress_bar_config(disable=True)
682
+
683
+ num_new_images = args.num_class_images - cur_class_images
684
+ logger.info(f"Number of class images to sample: {num_new_images}.")
685
+
686
+ sample_dataset = PromptDataset(args.class_prompt, num_new_images)
687
+ sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
688
+
689
+ sample_dataloader = accelerator.prepare(sample_dataloader)
690
+ pipeline.to(accelerator.device)
691
+
692
+ for example in tqdm(
693
+ sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
694
+ ):
695
+ images = pipeline(example["prompt"]).images
696
+
697
+ for i, image in enumerate(images):
698
+ hash_image = hashlib.sha1(image.tobytes()).hexdigest()
699
+ image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
700
+ image.save(image_filename)
701
+
702
+ del pipeline
703
+ if torch.cuda.is_available():
704
+ torch.cuda.empty_cache()
705
+
706
+ # Handle the repository creation
707
+ if accelerator.is_main_process:
708
+ if args.output_dir is not None:
709
+ os.makedirs(args.output_dir, exist_ok=True)
710
+
711
+ if args.push_to_hub:
712
+ repo_id = create_repo(
713
+ repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, private=True, token=args.hub_token
714
+ ).repo_id
715
+
716
+ # Load the tokenizers
717
+ tokenizer_one = AutoTokenizer.from_pretrained(
718
+ args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False
719
+ )
720
+ tokenizer_two = AutoTokenizer.from_pretrained(
721
+ args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, use_fast=False
722
+ )
723
+
724
+ # import correct text encoder classes
725
+ text_encoder_cls_one = import_model_class_from_model_name_or_path(
726
+ args.pretrained_model_name_or_path, args.revision
727
+ )
728
+ text_encoder_cls_two = import_model_class_from_model_name_or_path(
729
+ args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2"
730
+ )
731
+
732
+ # Load scheduler and models
733
+ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
734
+ text_encoder_one = text_encoder_cls_one.from_pretrained(
735
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
736
+ )
737
+ text_encoder_two = text_encoder_cls_two.from_pretrained(
738
+ args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision
739
+ )
740
+ vae_path = (
741
+ args.pretrained_model_name_or_path
742
+ if args.pretrained_vae_model_name_or_path is None
743
+ else args.pretrained_vae_model_name_or_path
744
+ )
745
+ vae = AutoencoderKL.from_pretrained(
746
+ vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision
747
+ )
748
+ unet = UNet2DConditionModel.from_pretrained(
749
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
750
+ )
751
+
752
+ # We only train the additional adapter LoRA layers
753
+ vae.requires_grad_(False)
754
+ text_encoder_one.requires_grad_(False)
755
+ text_encoder_two.requires_grad_(False)
756
+ unet.requires_grad_(False)
757
+
758
+ # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
759
+ # as these weights are only used for inference, keeping weights in full precision is not required.
760
+ weight_dtype = torch.float32
761
+ if accelerator.mixed_precision == "fp16":
762
+ weight_dtype = torch.float16
763
+ elif accelerator.mixed_precision == "bf16":
764
+ weight_dtype = torch.bfloat16
765
+
766
+ # Move unet, vae and text_encoder to device and cast to weight_dtype
767
+ unet.to(accelerator.device, dtype=weight_dtype)
768
+
769
+ # The VAE is always in float32 to avoid NaN losses.
770
+ vae.to(accelerator.device, dtype=torch.float32)
771
+
772
+ text_encoder_one.to(accelerator.device, dtype=weight_dtype)
773
+ text_encoder_two.to(accelerator.device, dtype=weight_dtype)
774
+
775
+ if args.enable_xformers_memory_efficient_attention:
776
+ if is_xformers_available():
777
+ import xformers
778
+
779
+ xformers_version = version.parse(xformers.__version__)
780
+ if xformers_version == version.parse("0.0.16"):
781
+ logger.warn(
782
+ "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
783
+ )
784
+ unet.enable_xformers_memory_efficient_attention()
785
+ else:
786
+ raise ValueError("xformers is not available. Make sure it is installed correctly")
787
+
788
+ if args.gradient_checkpointing:
789
+ unet.enable_gradient_checkpointing()
790
+ if args.train_text_encoder:
791
+ text_encoder_one.gradient_checkpointing_enable()
792
+ text_encoder_two.gradient_checkpointing_enable()
793
+
794
+ # now we will add new LoRA weights to the attention layers
795
+ # Set correct lora layers
796
+ unet_lora_attn_procs = {}
797
+ unet_lora_parameters = []
798
+ for name, attn_processor in unet.attn_processors.items():
799
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
800
+ if name.startswith("mid_block"):
801
+ hidden_size = unet.config.block_out_channels[-1]
802
+ elif name.startswith("up_blocks"):
803
+ block_id = int(name[len("up_blocks.")])
804
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
805
+ elif name.startswith("down_blocks"):
806
+ block_id = int(name[len("down_blocks.")])
807
+ hidden_size = unet.config.block_out_channels[block_id]
808
+
809
+ lora_attn_processor_class = (
810
+ LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
811
+ )
812
+ module = lora_attn_processor_class(
813
+ hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=args.rank
814
+ )
815
+ unet_lora_attn_procs[name] = module
816
+ unet_lora_parameters.extend(module.parameters())
817
+
818
+ unet.set_attn_processor(unet_lora_attn_procs)
819
+
820
+ # The text encoder comes from 🤗 transformers, so we cannot directly modify it.
821
+ # So, instead, we monkey-patch the forward calls of its attention-blocks.
822
+ if args.train_text_encoder:
823
+ # ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16
824
+ text_lora_parameters_one = LoraLoaderMixin._modify_text_encoder(
825
+ text_encoder_one, dtype=torch.float32, rank=args.rank
826
+ )
827
+ text_lora_parameters_two = LoraLoaderMixin._modify_text_encoder(
828
+ text_encoder_two, dtype=torch.float32, rank=args.rank
829
+ )
830
+
831
+ # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
832
+ def save_model_hook(models, weights, output_dir):
833
+ if accelerator.is_main_process:
834
+ # there are only two options here. Either are just the unet attn processor layers
835
+ # or there are the unet and text encoder atten layers
836
+ unet_lora_layers_to_save = None
837
+ text_encoder_one_lora_layers_to_save = None
838
+ text_encoder_two_lora_layers_to_save = None
839
+
840
+ for model in models:
841
+ if isinstance(model, type(accelerator.unwrap_model(unet))):
842
+ unet_lora_layers_to_save = unet_attn_processors_state_dict(model)
843
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))):
844
+ text_encoder_one_lora_layers_to_save = text_encoder_lora_state_dict(model)
845
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))):
846
+ text_encoder_two_lora_layers_to_save = text_encoder_lora_state_dict(model)
847
+ else:
848
+ raise ValueError(f"unexpected save model: {model.__class__}")
849
+
850
+ # make sure to pop weight so that corresponding model is not saved again
851
+ weights.pop()
852
+
853
+ StableDiffusionXLPipeline.save_lora_weights(
854
+ output_dir,
855
+ unet_lora_layers=unet_lora_layers_to_save,
856
+ text_encoder_lora_layers=text_encoder_one_lora_layers_to_save,
857
+ text_encoder_2_lora_layers=text_encoder_two_lora_layers_to_save,
858
+ )
859
+
860
+ def load_model_hook(models, input_dir):
861
+ unet_ = None
862
+ text_encoder_one_ = None
863
+ text_encoder_two_ = None
864
+
865
+ while len(models) > 0:
866
+ model = models.pop()
867
+
868
+ if isinstance(model, type(accelerator.unwrap_model(unet))):
869
+ unet_ = model
870
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))):
871
+ text_encoder_one_ = model
872
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))):
873
+ text_encoder_two_ = model
874
+ else:
875
+ raise ValueError(f"unexpected save model: {model.__class__}")
876
+
877
+ lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir)
878
+ LoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_)
879
+
880
+ text_encoder_state_dict = {k: v for k, v in lora_state_dict.items() if "text_encoder." in k}
881
+ LoraLoaderMixin.load_lora_into_text_encoder(
882
+ text_encoder_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_one_
883
+ )
884
+
885
+ text_encoder_2_state_dict = {k: v for k, v in lora_state_dict.items() if "text_encoder_2." in k}
886
+ LoraLoaderMixin.load_lora_into_text_encoder(
887
+ text_encoder_2_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_two_
888
+ )
889
+
890
+ accelerator.register_save_state_pre_hook(save_model_hook)
891
+ accelerator.register_load_state_pre_hook(load_model_hook)
892
+
893
+ # Enable TF32 for faster training on Ampere GPUs,
894
+ # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
895
+ if args.allow_tf32:
896
+ torch.backends.cuda.matmul.allow_tf32 = True
897
+
898
+ if args.scale_lr:
899
+ args.learning_rate = (
900
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
901
+ )
902
+
903
+ # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
904
+ if args.use_8bit_adam:
905
+ try:
906
+ import bitsandbytes as bnb
907
+ except ImportError:
908
+ raise ImportError(
909
+ "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
910
+ )
911
+
912
+ optimizer_class = bnb.optim.AdamW8bit
913
+ else:
914
+ optimizer_class = torch.optim.AdamW
915
+
916
+ # Optimizer creation
917
+ params_to_optimize = (
918
+ itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two)
919
+ if args.train_text_encoder
920
+ else unet_lora_parameters
921
+ )
922
+ optimizer = optimizer_class(
923
+ params_to_optimize,
924
+ lr=args.learning_rate,
925
+ betas=(args.adam_beta1, args.adam_beta2),
926
+ weight_decay=args.adam_weight_decay,
927
+ eps=args.adam_epsilon,
928
+ )
929
+
930
+ # Computes additional embeddings/ids required by the SDXL UNet.
931
+ # regular text emebddings (when `train_text_encoder` is not True)
932
+ # pooled text embeddings
933
+ # time ids
934
+
935
+ def compute_time_ids():
936
+ # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids
937
+ original_size = (args.resolution, args.resolution)
938
+ target_size = (args.resolution, args.resolution)
939
+ crops_coords_top_left = (args.crops_coords_top_left_h, args.crops_coords_top_left_w)
940
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
941
+ add_time_ids = torch.tensor([add_time_ids])
942
+ add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype)
943
+ return add_time_ids
944
+
945
+ if not args.train_text_encoder:
946
+ tokenizers = [tokenizer_one, tokenizer_two]
947
+ text_encoders = [text_encoder_one, text_encoder_two]
948
+
949
+ def compute_text_embeddings(prompt, text_encoders, tokenizers):
950
+ with torch.no_grad():
951
+ prompt_embeds, pooled_prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt)
952
+ prompt_embeds = prompt_embeds.to(accelerator.device)
953
+ pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device)
954
+ return prompt_embeds, pooled_prompt_embeds
955
+
956
+ # Handle instance prompt.
957
+ instance_time_ids = compute_time_ids()
958
+ if not args.train_text_encoder:
959
+ instance_prompt_hidden_states, instance_pooled_prompt_embeds = compute_text_embeddings(
960
+ args.instance_prompt, text_encoders, tokenizers
961
+ )
962
+
963
+ # Handle class prompt for prior-preservation.
964
+ if args.with_prior_preservation:
965
+ class_time_ids = compute_time_ids()
966
+ if not args.train_text_encoder:
967
+ class_prompt_hidden_states, class_pooled_prompt_embeds = compute_text_embeddings(
968
+ args.class_prompt, text_encoders, tokenizers
969
+ )
970
+
971
+ # Clear the memory here.
972
+ if not args.train_text_encoder:
973
+ del tokenizers, text_encoders
974
+ gc.collect()
975
+ torch.cuda.empty_cache()
976
+
977
+ # Pack the statically computed variables appropriately. This is so that we don't
978
+ # have to pass them to the dataloader.
979
+ add_time_ids = instance_time_ids
980
+ if args.with_prior_preservation:
981
+ add_time_ids = torch.cat([add_time_ids, class_time_ids], dim=0)
982
+
983
+ if not args.train_text_encoder:
984
+ prompt_embeds = instance_prompt_hidden_states
985
+ unet_add_text_embeds = instance_pooled_prompt_embeds
986
+ if args.with_prior_preservation:
987
+ prompt_embeds = torch.cat([prompt_embeds, class_prompt_hidden_states], dim=0)
988
+ unet_add_text_embeds = torch.cat([unet_add_text_embeds, class_pooled_prompt_embeds], dim=0)
989
+ else:
990
+ tokens_one = tokenize_prompt(tokenizer_one, args.instance_prompt)
991
+ tokens_two = tokenize_prompt(tokenizer_two, args.instance_prompt)
992
+ if args.with_prior_preservation:
993
+ class_tokens_one = tokenize_prompt(tokenizer_one, args.class_prompt)
994
+ class_tokens_two = tokenize_prompt(tokenizer_two, args.class_prompt)
995
+ tokens_one = torch.cat([tokens_one, class_tokens_one], dim=0)
996
+ tokens_two = torch.cat([tokens_two, class_tokens_two], dim=0)
997
+
998
+ # Dataset and DataLoaders creation:
999
+ train_dataset = DreamBoothDataset(
1000
+ instance_data_root=args.instance_data_dir,
1001
+ class_data_root=args.class_data_dir if args.with_prior_preservation else None,
1002
+ class_num=args.num_class_images,
1003
+ size=args.resolution,
1004
+ center_crop=args.center_crop,
1005
+ )
1006
+
1007
+ train_dataloader = torch.utils.data.DataLoader(
1008
+ train_dataset,
1009
+ batch_size=args.train_batch_size,
1010
+ shuffle=True,
1011
+ collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
1012
+ num_workers=args.dataloader_num_workers,
1013
+ )
1014
+
1015
+ # Scheduler and math around the number of training steps.
1016
+ overrode_max_train_steps = False
1017
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1018
+ if args.max_train_steps is None:
1019
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1020
+ overrode_max_train_steps = True
1021
+
1022
+ lr_scheduler = get_scheduler(
1023
+ args.lr_scheduler,
1024
+ optimizer=optimizer,
1025
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
1026
+ num_training_steps=args.max_train_steps * accelerator.num_processes,
1027
+ num_cycles=args.lr_num_cycles,
1028
+ power=args.lr_power,
1029
+ )
1030
+
1031
+ # Prepare everything with our `accelerator`.
1032
+ if args.train_text_encoder:
1033
+ unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1034
+ unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler
1035
+ )
1036
+ else:
1037
+ unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1038
+ unet, optimizer, train_dataloader, lr_scheduler
1039
+ )
1040
+
1041
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
1042
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1043
+ if overrode_max_train_steps:
1044
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1045
+ # Afterwards we recalculate our number of training epochs
1046
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
1047
+
1048
+ # We need to initialize the trackers we use, and also store our configuration.
1049
+ # The trackers initializes automatically on the main process.
1050
+ if accelerator.is_main_process:
1051
+ accelerator.init_trackers("dreambooth-lora-sd-xl", config=vars(args))
1052
+
1053
+ # Train!
1054
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
1055
+
1056
+ logger.info("***** Running training *****")
1057
+ logger.info(f" Num examples = {len(train_dataset)}")
1058
+ logger.info(f" Num batches each epoch = {len(train_dataloader)}")
1059
+ logger.info(f" Num Epochs = {args.num_train_epochs}")
1060
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
1061
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
1062
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
1063
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
1064
+ global_step = 0
1065
+ first_epoch = 0
1066
+
1067
+ # Potentially load in the weights and states from a previous save
1068
+ if args.resume_from_checkpoint:
1069
+ if args.resume_from_checkpoint != "latest":
1070
+ path = os.path.basename(args.resume_from_checkpoint)
1071
+ else:
1072
+ # Get the mos recent checkpoint
1073
+ dirs = os.listdir(args.output_dir)
1074
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
1075
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
1076
+ path = dirs[-1] if len(dirs) > 0 else None
1077
+
1078
+ if path is None:
1079
+ accelerator.print(
1080
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
1081
+ )
1082
+ args.resume_from_checkpoint = None
1083
+ else:
1084
+ accelerator.print(f"Resuming from checkpoint {path}")
1085
+ accelerator.load_state(os.path.join(args.output_dir, path))
1086
+ global_step = int(path.split("-")[1])
1087
+
1088
+ resume_global_step = global_step * args.gradient_accumulation_steps
1089
+ first_epoch = global_step // num_update_steps_per_epoch
1090
+ resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
1091
+
1092
+ # Only show the progress bar once on each machine.
1093
+ progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
1094
+ progress_bar.set_description("Steps")
1095
+
1096
+ for epoch in range(first_epoch, args.num_train_epochs):
1097
+ unet.train()
1098
+ if args.train_text_encoder:
1099
+ text_encoder_one.train()
1100
+ text_encoder_two.train()
1101
+ for step, batch in enumerate(train_dataloader):
1102
+ # Skip steps until we reach the resumed step
1103
+ if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
1104
+ if step % args.gradient_accumulation_steps == 0:
1105
+ progress_bar.update(1)
1106
+ continue
1107
+
1108
+ with accelerator.accumulate(unet):
1109
+ pixel_values = batch["pixel_values"].to(dtype=vae.dtype)
1110
+
1111
+ # Convert images to latent space
1112
+ model_input = vae.encode(pixel_values).latent_dist.sample()
1113
+ model_input = model_input * vae.config.scaling_factor
1114
+ if args.pretrained_vae_model_name_or_path is None:
1115
+ model_input = model_input.to(weight_dtype)
1116
+
1117
+ # Sample noise that we'll add to the latents
1118
+ noise = torch.randn_like(model_input)
1119
+ bsz = model_input.shape[0]
1120
+ # Sample a random timestep for each image
1121
+ timesteps = torch.randint(
1122
+ 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device
1123
+ )
1124
+ timesteps = timesteps.long()
1125
+
1126
+ # Add noise to the model input according to the noise magnitude at each timestep
1127
+ # (this is the forward diffusion process)
1128
+ noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
1129
+
1130
+ # Calculate the elements to repeat depending on the use of prior-preservation.
1131
+ elems_to_repeat = bsz // 2 if args.with_prior_preservation else bsz
1132
+
1133
+ # Predict the noise residual
1134
+ if not args.train_text_encoder:
1135
+ unet_added_conditions = {
1136
+ "time_ids": add_time_ids.repeat(elems_to_repeat, 1),
1137
+ "text_embeds": unet_add_text_embeds.repeat(elems_to_repeat, 1),
1138
+ }
1139
+ prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat, 1, 1)
1140
+ model_pred = unet(
1141
+ noisy_model_input,
1142
+ timesteps,
1143
+ prompt_embeds_input,
1144
+ added_cond_kwargs=unet_added_conditions,
1145
+ ).sample
1146
+ else:
1147
+ unet_added_conditions = {"time_ids": add_time_ids.repeat(elems_to_repeat, 1)}
1148
+ prompt_embeds, pooled_prompt_embeds = encode_prompt(
1149
+ text_encoders=[text_encoder_one, text_encoder_two],
1150
+ tokenizers=None,
1151
+ prompt=None,
1152
+ text_input_ids_list=[tokens_one, tokens_two],
1153
+ )
1154
+ unet_added_conditions.update({"text_embeds": pooled_prompt_embeds.repeat(elems_to_repeat, 1)})
1155
+ prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat, 1, 1)
1156
+ model_pred = unet(
1157
+ noisy_model_input, timesteps, prompt_embeds_input, added_cond_kwargs=unet_added_conditions
1158
+ ).sample
1159
+
1160
+ # Get the target for loss depending on the prediction type
1161
+ if noise_scheduler.config.prediction_type == "epsilon":
1162
+ target = noise
1163
+ elif noise_scheduler.config.prediction_type == "v_prediction":
1164
+ target = noise_scheduler.get_velocity(model_input, noise, timesteps)
1165
+ else:
1166
+ raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
1167
+
1168
+ if args.with_prior_preservation:
1169
+ # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
1170
+ model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
1171
+ target, target_prior = torch.chunk(target, 2, dim=0)
1172
+
1173
+ # Compute instance loss
1174
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1175
+
1176
+ # Compute prior loss
1177
+ prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
1178
+
1179
+ # Add the prior loss to the instance loss.
1180
+ loss = loss + args.prior_loss_weight * prior_loss
1181
+ else:
1182
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1183
+
1184
+ accelerator.backward(loss)
1185
+ if accelerator.sync_gradients:
1186
+ params_to_clip = (
1187
+ itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two)
1188
+ if args.train_text_encoder
1189
+ else unet_lora_parameters
1190
+ )
1191
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1192
+ optimizer.step()
1193
+ lr_scheduler.step()
1194
+ optimizer.zero_grad()
1195
+
1196
+ # Checks if the accelerator has performed an optimization step behind the scenes
1197
+ if accelerator.sync_gradients:
1198
+ progress_bar.update(1)
1199
+ global_step += 1
1200
+
1201
+ if accelerator.is_main_process:
1202
+ if global_step % args.checkpointing_steps == 0:
1203
+ # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
1204
+ if args.checkpoints_total_limit is not None:
1205
+ checkpoints = os.listdir(args.output_dir)
1206
+ checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1207
+ checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1208
+
1209
+ # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1210
+ if len(checkpoints) >= args.checkpoints_total_limit:
1211
+ num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1212
+ removing_checkpoints = checkpoints[0:num_to_remove]
1213
+
1214
+ logger.info(
1215
+ f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1216
+ )
1217
+ logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
1218
+
1219
+ for removing_checkpoint in removing_checkpoints:
1220
+ removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1221
+ shutil.rmtree(removing_checkpoint)
1222
+
1223
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1224
+ accelerator.save_state(save_path)
1225
+ logger.info(f"Saved state to {save_path}")
1226
+
1227
+ logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1228
+ progress_bar.set_postfix(**logs)
1229
+ accelerator.log(logs, step=global_step)
1230
+
1231
+ if global_step >= args.max_train_steps:
1232
+ break
1233
+
1234
+ if accelerator.is_main_process:
1235
+ if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
1236
+ logger.info(
1237
+ f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
1238
+ f" {args.validation_prompt}."
1239
+ )
1240
+ # create pipeline
1241
+ if not args.train_text_encoder:
1242
+ text_encoder_one = text_encoder_cls_one.from_pretrained(
1243
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
1244
+ )
1245
+ text_encoder_two = text_encoder_cls_two.from_pretrained(
1246
+ args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision
1247
+ )
1248
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
1249
+ args.pretrained_model_name_or_path,
1250
+ vae=vae,
1251
+ text_encoder=accelerator.unwrap_model(text_encoder_one),
1252
+ text_encoder_2=accelerator.unwrap_model(text_encoder_two),
1253
+ unet=accelerator.unwrap_model(unet),
1254
+ revision=args.revision,
1255
+ torch_dtype=weight_dtype,
1256
+ )
1257
+
1258
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
1259
+ scheduler_args = {}
1260
+
1261
+ if "variance_type" in pipeline.scheduler.config:
1262
+ variance_type = pipeline.scheduler.config.variance_type
1263
+
1264
+ if variance_type in ["learned", "learned_range"]:
1265
+ variance_type = "fixed_small"
1266
+
1267
+ scheduler_args["variance_type"] = variance_type
1268
+
1269
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
1270
+ pipeline.scheduler.config, **scheduler_args
1271
+ )
1272
+
1273
+ pipeline = pipeline.to(accelerator.device)
1274
+ pipeline.set_progress_bar_config(disable=True)
1275
+
1276
+ # run inference
1277
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
1278
+ pipeline_args = {"prompt": args.validation_prompt}
1279
+
1280
+ with torch.cuda.amp.autocast():
1281
+ images = [
1282
+ pipeline(**pipeline_args, generator=generator).images[0]
1283
+ for _ in range(args.num_validation_images)
1284
+ ]
1285
+
1286
+ for tracker in accelerator.trackers:
1287
+ if tracker.name == "tensorboard":
1288
+ np_images = np.stack([np.asarray(img) for img in images])
1289
+ tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
1290
+ if tracker.name == "wandb":
1291
+ tracker.log(
1292
+ {
1293
+ "validation": [
1294
+ wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
1295
+ for i, image in enumerate(images)
1296
+ ]
1297
+ }
1298
+ )
1299
+
1300
+ del pipeline
1301
+ torch.cuda.empty_cache()
1302
+
1303
+ # Save the lora layers
1304
+ accelerator.wait_for_everyone()
1305
+ if accelerator.is_main_process:
1306
+ unet = accelerator.unwrap_model(unet)
1307
+ unet = unet.to(torch.float32)
1308
+ unet_lora_layers = unet_attn_processors_state_dict(unet)
1309
+
1310
+ if args.train_text_encoder:
1311
+ text_encoder_one = accelerator.unwrap_model(text_encoder_one)
1312
+ text_encoder_lora_layers = text_encoder_lora_state_dict(text_encoder_one.to(torch.float32))
1313
+ text_encoder_two = accelerator.unwrap_model(text_encoder_two)
1314
+ text_encoder_2_lora_layers = text_encoder_lora_state_dict(text_encoder_two.to(torch.float32))
1315
+ else:
1316
+ text_encoder_lora_layers = None
1317
+ text_encoder_2_lora_layers = None
1318
+
1319
+ StableDiffusionXLPipeline.save_lora_weights(
1320
+ save_directory=args.output_dir,
1321
+ unet_lora_layers=unet_lora_layers,
1322
+ text_encoder_lora_layers=text_encoder_lora_layers,
1323
+ text_encoder_2_lora_layers=text_encoder_2_lora_layers,
1324
+ )
1325
+
1326
+ # Final inference
1327
+ # Load previous pipeline
1328
+ vae = AutoencoderKL.from_pretrained(
1329
+ vae_path,
1330
+ subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
1331
+ revision=args.revision,
1332
+ torch_dtype=weight_dtype,
1333
+ )
1334
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
1335
+ args.pretrained_model_name_or_path, vae=vae, revision=args.revision, torch_dtype=weight_dtype
1336
+ )
1337
+
1338
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
1339
+ scheduler_args = {}
1340
+
1341
+ if "variance_type" in pipeline.scheduler.config:
1342
+ variance_type = pipeline.scheduler.config.variance_type
1343
+
1344
+ if variance_type in ["learned", "learned_range"]:
1345
+ variance_type = "fixed_small"
1346
+
1347
+ scheduler_args["variance_type"] = variance_type
1348
+
1349
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
1350
+
1351
+ # load attention processors
1352
+ pipeline.load_lora_weights(args.output_dir)
1353
+
1354
+ # run inference
1355
+ images = []
1356
+ if args.validation_prompt and args.num_validation_images > 0:
1357
+ pipeline = pipeline.to(accelerator.device)
1358
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
1359
+ images = [
1360
+ pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
1361
+ for _ in range(args.num_validation_images)
1362
+ ]
1363
+
1364
+ for tracker in accelerator.trackers:
1365
+ if tracker.name == "tensorboard":
1366
+ np_images = np.stack([np.asarray(img) for img in images])
1367
+ tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC")
1368
+ if tracker.name == "wandb":
1369
+ tracker.log(
1370
+ {
1371
+ "test": [
1372
+ wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
1373
+ for i, image in enumerate(images)
1374
+ ]
1375
+ }
1376
+ )
1377
+
1378
+ if args.push_to_hub:
1379
+ save_model_card(
1380
+ repo_id,
1381
+ images=images,
1382
+ dataset_id=args.dataset_id,
1383
+ base_model=args.pretrained_model_name_or_path,
1384
+ train_text_encoder=args.train_text_encoder,
1385
+ prompt=args.instance_prompt,
1386
+ repo_folder=args.output_dir,
1387
+ vae_path=args.pretrained_vae_model_name_or_path,
1388
+
1389
+ )
1390
+ upload_folder(
1391
+ repo_id=repo_id,
1392
+ folder_path=args.output_dir,
1393
+ commit_message="End of training",
1394
+ ignore_patterns=["step_*", "epoch_*"],
1395
+ token=args.hub_token
1396
+ )
1397
+
1398
+ accelerator.end_training()
1399
+
1400
+
1401
+ if __name__ == "__main__":
1402
+ args = parse_args()
1403
+ main(args)