| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import argparse |
| | import functools |
| | import gc |
| | import logging |
| | import math |
| | import os |
| | import random |
| | import shutil |
| | from pathlib import Path |
| |
|
| | import accelerate |
| | import numpy as np |
| | import torch |
| | import torch.utils.checkpoint |
| | import transformers |
| | from accelerate import Accelerator |
| | from accelerate.logging import get_logger |
| | from accelerate.utils import ProjectConfiguration, set_seed |
| | from datasets import load_dataset |
| | from huggingface_hub import create_repo, upload_folder |
| | from packaging import version |
| | from PIL import Image |
| | from torchvision import transforms |
| | from tqdm.auto import tqdm |
| | from transformers import AutoTokenizer, PretrainedConfig |
| |
|
| | import diffusers |
| | from diffusers import ( |
| | AutoencoderKL, |
| | EulerDiscreteScheduler, |
| | StableDiffusionXLAdapterPipeline, |
| | T2IAdapter, |
| | UNet2DConditionModel, |
| | ) |
| | from diffusers.optimization import get_scheduler |
| | from diffusers.utils import check_min_version, is_wandb_available |
| | from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card |
| | from diffusers.utils.import_utils import is_xformers_available |
| | from diffusers.utils.torch_utils import is_compiled_module |
| |
|
| |
|
| | MAX_SEQ_LENGTH = 77 |
| |
|
| | if is_wandb_available(): |
| | import wandb |
| |
|
| | |
| | check_min_version("0.36.0.dev0") |
| |
|
| | logger = get_logger(__name__) |
| |
|
| |
|
| | def image_grid(imgs, rows, cols): |
| | assert len(imgs) == rows * cols |
| |
|
| | w, h = imgs[0].size |
| | grid = Image.new("RGB", size=(cols * w, rows * h)) |
| |
|
| | for i, img in enumerate(imgs): |
| | grid.paste(img, box=(i % cols * w, i // cols * h)) |
| | return grid |
| |
|
| |
|
| | def log_validation(vae, unet, adapter, args, accelerator, weight_dtype, step): |
| | logger.info("Running validation... ") |
| |
|
| | adapter = accelerator.unwrap_model(adapter) |
| |
|
| | pipeline = StableDiffusionXLAdapterPipeline.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | vae=vae, |
| | unet=unet, |
| | adapter=adapter, |
| | revision=args.revision, |
| | variant=args.variant, |
| | torch_dtype=weight_dtype, |
| | ) |
| | pipeline = pipeline.to(accelerator.device) |
| | pipeline.set_progress_bar_config(disable=True) |
| |
|
| | if args.enable_xformers_memory_efficient_attention: |
| | pipeline.enable_xformers_memory_efficient_attention() |
| |
|
| | if args.seed is None: |
| | generator = None |
| | else: |
| | generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) |
| |
|
| | if len(args.validation_image) == len(args.validation_prompt): |
| | validation_images = args.validation_image |
| | validation_prompts = args.validation_prompt |
| | elif len(args.validation_image) == 1: |
| | validation_images = args.validation_image * len(args.validation_prompt) |
| | validation_prompts = args.validation_prompt |
| | elif len(args.validation_prompt) == 1: |
| | validation_images = args.validation_image |
| | validation_prompts = args.validation_prompt * len(args.validation_image) |
| | else: |
| | raise ValueError( |
| | "number of `args.validation_image` and `args.validation_prompt` should be checked in `parse_args`" |
| | ) |
| |
|
| | image_logs = [] |
| |
|
| | for validation_prompt, validation_image in zip(validation_prompts, validation_images): |
| | validation_image = Image.open(validation_image).convert("RGB") |
| | validation_image = validation_image.resize((args.resolution, args.resolution)) |
| |
|
| | images = [] |
| |
|
| | for _ in range(args.num_validation_images): |
| | with torch.autocast("cuda"): |
| | image = pipeline( |
| | prompt=validation_prompt, image=validation_image, num_inference_steps=20, generator=generator |
| | ).images[0] |
| | images.append(image) |
| |
|
| | image_logs.append( |
| | {"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt} |
| | ) |
| |
|
| | for tracker in accelerator.trackers: |
| | if tracker.name == "tensorboard": |
| | for log in image_logs: |
| | images = log["images"] |
| | validation_prompt = log["validation_prompt"] |
| | validation_image = log["validation_image"] |
| |
|
| | formatted_images = [np.asarray(validation_image)] |
| |
|
| | for image in images: |
| | formatted_images.append(np.asarray(image)) |
| |
|
| | formatted_images = np.stack(formatted_images) |
| |
|
| | tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC") |
| | elif tracker.name == "wandb": |
| | formatted_images = [] |
| |
|
| | for log in image_logs: |
| | images = log["images"] |
| | validation_prompt = log["validation_prompt"] |
| | validation_image = log["validation_image"] |
| |
|
| | formatted_images.append(wandb.Image(validation_image, caption="adapter conditioning")) |
| |
|
| | for image in images: |
| | image = wandb.Image(image, caption=validation_prompt) |
| | formatted_images.append(image) |
| |
|
| | tracker.log({"validation": formatted_images}) |
| | else: |
| | logger.warning(f"image logging not implemented for {tracker.name}") |
| |
|
| | del pipeline |
| | gc.collect() |
| | torch.cuda.empty_cache() |
| |
|
| | return image_logs |
| |
|
| |
|
| | def import_model_class_from_model_name_or_path( |
| | pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" |
| | ): |
| | text_encoder_config = PretrainedConfig.from_pretrained( |
| | pretrained_model_name_or_path, subfolder=subfolder, revision=revision |
| | ) |
| | model_class = text_encoder_config.architectures[0] |
| |
|
| | if model_class == "CLIPTextModel": |
| | from transformers import CLIPTextModel |
| |
|
| | return CLIPTextModel |
| | elif model_class == "CLIPTextModelWithProjection": |
| | from transformers import CLIPTextModelWithProjection |
| |
|
| | return CLIPTextModelWithProjection |
| | else: |
| | raise ValueError(f"{model_class} is not supported.") |
| |
|
| |
|
| | def save_model_card(repo_id: str, image_logs: dict = None, base_model: str = None, repo_folder: str = None): |
| | img_str = "" |
| | if image_logs is not None: |
| | img_str = "You can find some example images below.\n" |
| | for i, log in enumerate(image_logs): |
| | images = log["images"] |
| | validation_prompt = log["validation_prompt"] |
| | validation_image = log["validation_image"] |
| | validation_image.save(os.path.join(repo_folder, "image_control.png")) |
| | img_str += f"prompt: {validation_prompt}\n" |
| | images = [validation_image] + images |
| | image_grid(images, 1, len(images)).save(os.path.join(repo_folder, f"images_{i}.png")) |
| | img_str += f"\n" |
| |
|
| | model_description = f""" |
| | # t2iadapter-{repo_id} |
| | |
| | These are t2iadapter weights trained on {base_model} with new type of conditioning. |
| | {img_str} |
| | """ |
| | model_card = load_or_create_model_card( |
| | repo_id_or_path=repo_id, |
| | from_training=True, |
| | license="creativeml-openrail-m", |
| | base_model=base_model, |
| | model_description=model_description, |
| | inference=True, |
| | ) |
| |
|
| | tags = [ |
| | "stable-diffusion-xl", |
| | "stable-diffusion-xl-diffusers", |
| | "text-to-image", |
| | "diffusers", |
| | "t2iadapter", |
| | "diffusers-training", |
| | ] |
| | model_card = populate_model_card(model_card, tags=tags) |
| |
|
| | model_card.save(os.path.join(repo_folder, "README.md")) |
| |
|
| |
|
| | def parse_args(input_args=None): |
| | parser = argparse.ArgumentParser(description="Simple example of a ControlNet training script.") |
| | parser.add_argument( |
| | "--pretrained_model_name_or_path", |
| | type=str, |
| | default=None, |
| | required=True, |
| | help="Path to pretrained model or model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--pretrained_vae_model_name_or_path", |
| | type=str, |
| | default=None, |
| | help="Path to an improved VAE to stabilize training. For more details check out: https://github.com/huggingface/diffusers/pull/4038.", |
| | ) |
| | parser.add_argument( |
| | "--adapter_model_name_or_path", |
| | type=str, |
| | default=None, |
| | help="Path to pretrained adapter model or model identifier from huggingface.co/models." |
| | " If not specified adapter weights are initialized w.r.t the configurations of SDXL.", |
| | ) |
| | parser.add_argument( |
| | "--revision", |
| | type=str, |
| | default=None, |
| | required=False, |
| | help=( |
| | "Revision of pretrained model identifier from huggingface.co/models. Trainable model components should be" |
| | " float32 precision." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--variant", |
| | type=str, |
| | default=None, |
| | help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", |
| | ) |
| | parser.add_argument( |
| | "--tokenizer_name", |
| | type=str, |
| | default=None, |
| | help="Pretrained tokenizer name or path if not the same as model_name", |
| | ) |
| | parser.add_argument( |
| | "--output_dir", |
| | type=str, |
| | default="t2iadapter-model", |
| | help="The output directory where the model predictions and checkpoints will be written.", |
| | ) |
| | parser.add_argument( |
| | "--cache_dir", |
| | type=str, |
| | default=None, |
| | help="The directory where the downloaded models and datasets will be stored.", |
| | ) |
| | parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") |
| | parser.add_argument( |
| | "--resolution", |
| | type=int, |
| | default=1024, |
| | help=( |
| | "The resolution for input images, all the images in the train/validation dataset will be resized to this" |
| | " resolution" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--detection_resolution", |
| | type=int, |
| | default=None, |
| | help=( |
| | "The resolution for input images, all the images in the train/validation dataset will be resized to this" |
| | " resolution" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--crops_coords_top_left_h", |
| | type=int, |
| | default=0, |
| | help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."), |
| | ) |
| | parser.add_argument( |
| | "--crops_coords_top_left_w", |
| | type=int, |
| | default=0, |
| | help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."), |
| | ) |
| | parser.add_argument( |
| | "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." |
| | ) |
| | parser.add_argument("--num_train_epochs", type=int, default=1) |
| | parser.add_argument( |
| | "--max_train_steps", |
| | type=int, |
| | default=None, |
| | help="Total number of training steps to perform. If provided, overrides num_train_epochs.", |
| | ) |
| | parser.add_argument( |
| | "--checkpointing_steps", |
| | type=int, |
| | default=500, |
| | help=( |
| | "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. " |
| | "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference." |
| | "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components." |
| | "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step" |
| | "instructions." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--checkpoints_total_limit", |
| | type=int, |
| | default=3, |
| | help=("Max number of checkpoints to store."), |
| | ) |
| | parser.add_argument( |
| | "--resume_from_checkpoint", |
| | type=str, |
| | default=None, |
| | help=( |
| | "Whether training should be resumed from a previous checkpoint. Use a path saved by" |
| | ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--gradient_accumulation_steps", |
| | type=int, |
| | default=1, |
| | help="Number of updates steps to accumulate before performing a backward/update pass.", |
| | ) |
| | parser.add_argument( |
| | "--gradient_checkpointing", |
| | action="store_true", |
| | help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", |
| | ) |
| | parser.add_argument( |
| | "--learning_rate", |
| | type=float, |
| | default=5e-6, |
| | help="Initial learning rate (after the potential warmup period) to use.", |
| | ) |
| | parser.add_argument( |
| | "--scale_lr", |
| | action="store_true", |
| | default=False, |
| | help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", |
| | ) |
| | parser.add_argument( |
| | "--lr_scheduler", |
| | type=str, |
| | default="constant", |
| | help=( |
| | 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' |
| | ' "constant", "constant_with_warmup"]' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." |
| | ) |
| | parser.add_argument( |
| | "--lr_num_cycles", |
| | type=int, |
| | default=1, |
| | help="Number of hard resets of the lr in cosine_with_restarts scheduler.", |
| | ) |
| | parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") |
| | parser.add_argument( |
| | "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." |
| | ) |
| | parser.add_argument( |
| | "--dataloader_num_workers", |
| | type=int, |
| | default=1, |
| | help=("Number of subprocesses to use for data loading."), |
| | ) |
| | parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") |
| | parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") |
| | parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") |
| | parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") |
| | parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") |
| | parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") |
| | parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") |
| | parser.add_argument( |
| | "--hub_model_id", |
| | type=str, |
| | default=None, |
| | help="The name of the repository to keep in sync with the local `output_dir`.", |
| | ) |
| | parser.add_argument( |
| | "--logging_dir", |
| | type=str, |
| | default="logs", |
| | help=( |
| | "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" |
| | " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--allow_tf32", |
| | action="store_true", |
| | help=( |
| | "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" |
| | " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--report_to", |
| | type=str, |
| | default="tensorboard", |
| | help=( |
| | 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' |
| | ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--mixed_precision", |
| | type=str, |
| | default=None, |
| | choices=["no", "fp16", "bf16"], |
| | help=( |
| | "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" |
| | " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" |
| | " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." |
| | ) |
| | parser.add_argument( |
| | "--set_grads_to_none", |
| | action="store_true", |
| | help=( |
| | "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain" |
| | " behaviors, so disable this argument if it causes any problems. More info:" |
| | " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html" |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--dataset_name", |
| | type=str, |
| | default=None, |
| | help=( |
| | "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," |
| | " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," |
| | " or to a folder containing files that 🤗 Datasets can understand." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--dataset_config_name", |
| | type=str, |
| | default=None, |
| | help="The config of the Dataset, leave as None if there's only one config.", |
| | ) |
| | parser.add_argument( |
| | "--train_data_dir", |
| | type=str, |
| | default=None, |
| | help=( |
| | "A folder containing the training data. Folder contents must follow the structure described in" |
| | " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" |
| | " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--image_column", type=str, default="image", help="The column of the dataset containing the target image." |
| | ) |
| | parser.add_argument( |
| | "--conditioning_image_column", |
| | type=str, |
| | default="conditioning_image", |
| | help="The column of the dataset containing the adapter conditioning image.", |
| | ) |
| | parser.add_argument( |
| | "--caption_column", |
| | type=str, |
| | default="text", |
| | help="The column of the dataset containing a caption or a list of captions.", |
| | ) |
| | parser.add_argument( |
| | "--max_train_samples", |
| | type=int, |
| | default=None, |
| | help=( |
| | "For debugging purposes or quicker training, truncate the number of training examples to this " |
| | "value if set." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--proportion_empty_prompts", |
| | type=float, |
| | default=0, |
| | help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", |
| | ) |
| | parser.add_argument( |
| | "--validation_prompt", |
| | type=str, |
| | default=None, |
| | nargs="+", |
| | help=( |
| | "A set of prompts evaluated every `--validation_steps` and logged to `--report_to`." |
| | " Provide either a matching number of `--validation_image`s, a single `--validation_image`" |
| | " to be used with all prompts, or a single prompt that will be used with all `--validation_image`s." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--validation_image", |
| | type=str, |
| | default=None, |
| | nargs="+", |
| | help=( |
| | "A set of paths to the t2iadapter conditioning image be evaluated every `--validation_steps`" |
| | " and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a" |
| | " a single `--validation_prompt` to be used with all `--validation_image`s, or a single" |
| | " `--validation_image` that will be used with all `--validation_prompt`s." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--num_validation_images", |
| | type=int, |
| | default=4, |
| | help="Number of images to be generated for each `--validation_image`, `--validation_prompt` pair", |
| | ) |
| | parser.add_argument( |
| | "--validation_steps", |
| | type=int, |
| | default=100, |
| | help=( |
| | "Run validation every X steps. Validation consists of running the prompt" |
| | " `args.validation_prompt` multiple times: `args.num_validation_images`" |
| | " and logging the images." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--tracker_project_name", |
| | type=str, |
| | default="sd_xl_train_t2iadapter", |
| | help=( |
| | "The `project_name` argument passed to Accelerator.init_trackers for" |
| | " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" |
| | ), |
| | ) |
| |
|
| | if input_args is not None: |
| | args = parser.parse_args(input_args) |
| | else: |
| | args = parser.parse_args() |
| |
|
| | if args.dataset_name is None and args.train_data_dir is None: |
| | raise ValueError("Specify either `--dataset_name` or `--train_data_dir`") |
| |
|
| | if args.dataset_name is not None and args.train_data_dir is not None: |
| | raise ValueError("Specify only one of `--dataset_name` or `--train_data_dir`") |
| |
|
| | if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: |
| | raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") |
| |
|
| | if args.validation_prompt is not None and args.validation_image is None: |
| | raise ValueError("`--validation_image` must be set if `--validation_prompt` is set") |
| |
|
| | if args.validation_prompt is None and args.validation_image is not None: |
| | raise ValueError("`--validation_prompt` must be set if `--validation_image` is set") |
| |
|
| | if ( |
| | args.validation_image is not None |
| | and args.validation_prompt is not None |
| | and len(args.validation_image) != 1 |
| | and len(args.validation_prompt) != 1 |
| | and len(args.validation_image) != len(args.validation_prompt) |
| | ): |
| | raise ValueError( |
| | "Must provide either 1 `--validation_image`, 1 `--validation_prompt`," |
| | " or the same number of `--validation_prompt`s and `--validation_image`s" |
| | ) |
| |
|
| | if args.resolution % 8 != 0: |
| | raise ValueError( |
| | "`--resolution` must be divisible by 8 for consistently sized encoded images between the VAE and the t2iadapter encoder." |
| | ) |
| |
|
| | return args |
| |
|
| |
|
| | def get_train_dataset(args, accelerator): |
| | |
| | |
| |
|
| | |
| | |
| | if args.dataset_name is not None: |
| | |
| | dataset = load_dataset( |
| | args.dataset_name, |
| | args.dataset_config_name, |
| | cache_dir=args.cache_dir, |
| | ) |
| | else: |
| | if args.train_data_dir is not None: |
| | dataset = load_dataset( |
| | args.train_data_dir, |
| | cache_dir=args.cache_dir, |
| | ) |
| | |
| | |
| |
|
| | |
| | |
| | column_names = dataset["train"].column_names |
| |
|
| | |
| | if args.image_column is None: |
| | image_column = column_names[0] |
| | logger.info(f"image column defaulting to {image_column}") |
| | else: |
| | image_column = args.image_column |
| | if image_column not in column_names: |
| | raise ValueError( |
| | f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" |
| | ) |
| |
|
| | if args.caption_column is None: |
| | caption_column = column_names[1] |
| | logger.info(f"caption column defaulting to {caption_column}") |
| | else: |
| | caption_column = args.caption_column |
| | if caption_column not in column_names: |
| | raise ValueError( |
| | f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" |
| | ) |
| |
|
| | if args.conditioning_image_column is None: |
| | conditioning_image_column = column_names[2] |
| | logger.info(f"conditioning image column defaulting to {conditioning_image_column}") |
| | else: |
| | conditioning_image_column = args.conditioning_image_column |
| | if conditioning_image_column not in column_names: |
| | raise ValueError( |
| | f"`--conditioning_image_column` value '{args.conditioning_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" |
| | ) |
| |
|
| | with accelerator.main_process_first(): |
| | train_dataset = dataset["train"].shuffle(seed=args.seed) |
| | if args.max_train_samples is not None: |
| | train_dataset = train_dataset.select(range(args.max_train_samples)) |
| | return train_dataset |
| |
|
| |
|
| | |
| | def encode_prompt(prompt_batch, text_encoders, tokenizers, proportion_empty_prompts, is_train=True): |
| | prompt_embeds_list = [] |
| |
|
| | captions = [] |
| | for caption in prompt_batch: |
| | if random.random() < proportion_empty_prompts: |
| | captions.append("") |
| | elif isinstance(caption, str): |
| | captions.append(caption) |
| | elif isinstance(caption, (list, np.ndarray)): |
| | |
| | captions.append(random.choice(caption) if is_train else caption[0]) |
| |
|
| | with torch.no_grad(): |
| | for tokenizer, text_encoder in zip(tokenizers, text_encoders): |
| | text_inputs = tokenizer( |
| | captions, |
| | padding="max_length", |
| | max_length=tokenizer.model_max_length, |
| | truncation=True, |
| | return_tensors="pt", |
| | ) |
| | text_input_ids = text_inputs.input_ids |
| | prompt_embeds = text_encoder( |
| | text_input_ids.to(text_encoder.device), |
| | output_hidden_states=True, |
| | ) |
| |
|
| | |
| | pooled_prompt_embeds = prompt_embeds[0] |
| | prompt_embeds = prompt_embeds.hidden_states[-2] |
| | bs_embed, seq_len, _ = prompt_embeds.shape |
| | prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) |
| | prompt_embeds_list.append(prompt_embeds) |
| |
|
| | prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) |
| | pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1) |
| | return prompt_embeds, pooled_prompt_embeds |
| |
|
| |
|
| | def prepare_train_dataset(dataset, accelerator): |
| | image_transforms = transforms.Compose( |
| | [ |
| | transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), |
| | transforms.CenterCrop(args.resolution), |
| | transforms.ToTensor(), |
| | transforms.Normalize([0.5], [0.5]), |
| | ] |
| | ) |
| |
|
| | conditioning_image_transforms = transforms.Compose( |
| | [ |
| | transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), |
| | transforms.CenterCrop(args.resolution), |
| | transforms.ToTensor(), |
| | ] |
| | ) |
| |
|
| | def preprocess_train(examples): |
| | images = [image.convert("RGB") for image in examples[args.image_column]] |
| | images = [image_transforms(image) for image in images] |
| |
|
| | conditioning_images = [image.convert("RGB") for image in examples[args.conditioning_image_column]] |
| | conditioning_images = [conditioning_image_transforms(image) for image in conditioning_images] |
| |
|
| | examples["pixel_values"] = images |
| | examples["conditioning_pixel_values"] = conditioning_images |
| |
|
| | return examples |
| |
|
| | with accelerator.main_process_first(): |
| | dataset = dataset.with_transform(preprocess_train) |
| |
|
| | return dataset |
| |
|
| |
|
| | def collate_fn(examples): |
| | pixel_values = torch.stack([example["pixel_values"] for example in examples]) |
| | pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() |
| |
|
| | conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples]) |
| | conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float() |
| |
|
| | prompt_ids = torch.stack([torch.tensor(example["prompt_embeds"]) for example in examples]) |
| |
|
| | add_text_embeds = torch.stack([torch.tensor(example["text_embeds"]) for example in examples]) |
| | add_time_ids = torch.stack([torch.tensor(example["time_ids"]) for example in examples]) |
| |
|
| | return { |
| | "pixel_values": pixel_values, |
| | "conditioning_pixel_values": conditioning_pixel_values, |
| | "prompt_ids": prompt_ids, |
| | "unet_added_conditions": {"text_embeds": add_text_embeds, "time_ids": add_time_ids}, |
| | } |
| |
|
| |
|
| | def main(args): |
| | if args.report_to == "wandb" and args.hub_token is not None: |
| | raise ValueError( |
| | "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." |
| | " Please use `hf auth login` to authenticate with the Hub." |
| | ) |
| |
|
| | logging_dir = Path(args.output_dir, args.logging_dir) |
| |
|
| | accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) |
| |
|
| | accelerator = Accelerator( |
| | gradient_accumulation_steps=args.gradient_accumulation_steps, |
| | mixed_precision=args.mixed_precision, |
| | log_with=args.report_to, |
| | project_config=accelerator_project_config, |
| | ) |
| |
|
| | |
| | if torch.backends.mps.is_available(): |
| | accelerator.native_amp = False |
| |
|
| | |
| | logging.basicConfig( |
| | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| | datefmt="%m/%d/%Y %H:%M:%S", |
| | level=logging.INFO, |
| | ) |
| | logger.info(accelerator.state, main_process_only=False) |
| | if accelerator.is_local_main_process: |
| | transformers.utils.logging.set_verbosity_warning() |
| | diffusers.utils.logging.set_verbosity_info() |
| | else: |
| | transformers.utils.logging.set_verbosity_error() |
| | diffusers.utils.logging.set_verbosity_error() |
| |
|
| | |
| | if args.seed is not None: |
| | set_seed(args.seed) |
| |
|
| | |
| | if accelerator.is_main_process: |
| | if args.output_dir is not None: |
| | os.makedirs(args.output_dir, exist_ok=True) |
| |
|
| | if args.push_to_hub: |
| | repo_id = create_repo( |
| | repo_id=args.hub_model_id or Path(args.output_dir).name, |
| | exist_ok=True, |
| | token=args.hub_token, |
| | private=True, |
| | ).repo_id |
| |
|
| | |
| | tokenizer_one = AutoTokenizer.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="tokenizer", |
| | revision=args.revision, |
| | use_fast=False, |
| | ) |
| | tokenizer_two = AutoTokenizer.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="tokenizer_2", |
| | revision=args.revision, |
| | use_fast=False, |
| | ) |
| |
|
| | |
| | text_encoder_cls_one = import_model_class_from_model_name_or_path( |
| | args.pretrained_model_name_or_path, args.revision |
| | ) |
| | text_encoder_cls_two = import_model_class_from_model_name_or_path( |
| | args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2" |
| | ) |
| |
|
| | |
| | noise_scheduler = EulerDiscreteScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") |
| | text_encoder_one = text_encoder_cls_one.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant |
| | ) |
| | text_encoder_two = text_encoder_cls_two.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant |
| | ) |
| | vae_path = ( |
| | args.pretrained_model_name_or_path |
| | if args.pretrained_vae_model_name_or_path is None |
| | else args.pretrained_vae_model_name_or_path |
| | ) |
| | vae = AutoencoderKL.from_pretrained( |
| | vae_path, |
| | subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, |
| | revision=args.revision, |
| | variant=args.variant, |
| | ) |
| | unet = UNet2DConditionModel.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant |
| | ) |
| |
|
| | if args.adapter_model_name_or_path: |
| | logger.info("Loading existing adapter weights.") |
| | t2iadapter = T2IAdapter.from_pretrained(args.adapter_model_name_or_path) |
| | else: |
| | logger.info("Initializing t2iadapter weights.") |
| | t2iadapter = T2IAdapter( |
| | in_channels=3, |
| | channels=(320, 640, 1280, 1280), |
| | num_res_blocks=2, |
| | downscale_factor=16, |
| | adapter_type="full_adapter_xl", |
| | ) |
| |
|
| | |
| | if version.parse(accelerate.__version__) >= version.parse("0.16.0"): |
| | |
| | def save_model_hook(models, weights, output_dir): |
| | i = len(weights) - 1 |
| |
|
| | while len(weights) > 0: |
| | weights.pop() |
| | model = models[i] |
| |
|
| | sub_dir = "t2iadapter" |
| | model.save_pretrained(os.path.join(output_dir, sub_dir)) |
| |
|
| | i -= 1 |
| |
|
| | def load_model_hook(models, input_dir): |
| | while len(models) > 0: |
| | |
| | model = models.pop() |
| |
|
| | |
| | load_model = T2IAdapter.from_pretrained(os.path.join(input_dir, "t2iadapter")) |
| |
|
| | if args.control_type != "style": |
| | model.register_to_config(**load_model.config) |
| |
|
| | model.load_state_dict(load_model.state_dict()) |
| | del load_model |
| |
|
| | accelerator.register_save_state_pre_hook(save_model_hook) |
| | accelerator.register_load_state_pre_hook(load_model_hook) |
| |
|
| | vae.requires_grad_(False) |
| | text_encoder_one.requires_grad_(False) |
| | text_encoder_two.requires_grad_(False) |
| | t2iadapter.train() |
| | unet.train() |
| |
|
| | if args.enable_xformers_memory_efficient_attention: |
| | if is_xformers_available(): |
| | import xformers |
| |
|
| | xformers_version = version.parse(xformers.__version__) |
| | if xformers_version == version.parse("0.0.16"): |
| | logger.warning( |
| | "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." |
| | ) |
| | unet.enable_xformers_memory_efficient_attention() |
| | else: |
| | raise ValueError("xformers is not available. Make sure it is installed correctly") |
| |
|
| | def unwrap_model(model): |
| | model = accelerator.unwrap_model(model) |
| | model = model._orig_mod if is_compiled_module(model) else model |
| | return model |
| |
|
| | if args.gradient_checkpointing: |
| | unet.enable_gradient_checkpointing() |
| |
|
| | |
| | low_precision_error_string = ( |
| | " Please make sure to always have all model weights in full float32 precision when starting training - even if" |
| | " doing mixed precision training, copy of the weights should still be float32." |
| | ) |
| |
|
| | if unwrap_model(t2iadapter).dtype != torch.float32: |
| | raise ValueError( |
| | f"Controlnet loaded as datatype {unwrap_model(t2iadapter).dtype}. {low_precision_error_string}" |
| | ) |
| |
|
| | |
| | |
| | if args.allow_tf32: |
| | torch.backends.cuda.matmul.allow_tf32 = True |
| |
|
| | if args.scale_lr: |
| | args.learning_rate = ( |
| | args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes |
| | ) |
| |
|
| | |
| | if args.use_8bit_adam: |
| | try: |
| | import bitsandbytes as bnb |
| | except ImportError: |
| | raise ImportError( |
| | "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." |
| | ) |
| |
|
| | optimizer_class = bnb.optim.AdamW8bit |
| | else: |
| | optimizer_class = torch.optim.AdamW |
| |
|
| | |
| | params_to_optimize = t2iadapter.parameters() |
| | optimizer = optimizer_class( |
| | params_to_optimize, |
| | lr=args.learning_rate, |
| | betas=(args.adam_beta1, args.adam_beta2), |
| | weight_decay=args.adam_weight_decay, |
| | eps=args.adam_epsilon, |
| | ) |
| |
|
| | |
| | |
| | weight_dtype = torch.float32 |
| | if accelerator.mixed_precision == "fp16": |
| | weight_dtype = torch.float16 |
| | elif accelerator.mixed_precision == "bf16": |
| | weight_dtype = torch.bfloat16 |
| |
|
| | |
| | |
| | if args.pretrained_vae_model_name_or_path is not None: |
| | vae.to(accelerator.device, dtype=weight_dtype) |
| | else: |
| | vae.to(accelerator.device, dtype=torch.float32) |
| | unet.to(accelerator.device, dtype=weight_dtype) |
| | text_encoder_one.to(accelerator.device, dtype=weight_dtype) |
| | text_encoder_two.to(accelerator.device, dtype=weight_dtype) |
| |
|
| | |
| | |
| | def compute_embeddings(batch, proportion_empty_prompts, text_encoders, tokenizers, is_train=True): |
| | original_size = (args.resolution, args.resolution) |
| | target_size = (args.resolution, args.resolution) |
| | crops_coords_top_left = (args.crops_coords_top_left_h, args.crops_coords_top_left_w) |
| | prompt_batch = batch[args.caption_column] |
| |
|
| | prompt_embeds, pooled_prompt_embeds = encode_prompt( |
| | prompt_batch, text_encoders, tokenizers, proportion_empty_prompts, is_train |
| | ) |
| | add_text_embeds = pooled_prompt_embeds |
| |
|
| | |
| | add_time_ids = list(original_size + crops_coords_top_left + target_size) |
| | add_time_ids = torch.tensor([add_time_ids]) |
| |
|
| | prompt_embeds = prompt_embeds.to(accelerator.device) |
| | add_text_embeds = add_text_embeds.to(accelerator.device) |
| | add_time_ids = add_time_ids.repeat(len(prompt_batch), 1) |
| | add_time_ids = add_time_ids.to(accelerator.device, dtype=prompt_embeds.dtype) |
| | unet_added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} |
| |
|
| | return {"prompt_embeds": prompt_embeds, **unet_added_cond_kwargs} |
| |
|
| | def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): |
| | sigmas = noise_scheduler.sigmas.to(device=accelerator.device, dtype=dtype) |
| | schedule_timesteps = noise_scheduler.timesteps.to(accelerator.device) |
| | timesteps = timesteps.to(accelerator.device) |
| |
|
| | step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] |
| |
|
| | sigma = sigmas[step_indices].flatten() |
| | while len(sigma.shape) < n_dim: |
| | sigma = sigma.unsqueeze(-1) |
| | return sigma |
| |
|
| | |
| | |
| | text_encoders = [text_encoder_one, text_encoder_two] |
| | tokenizers = [tokenizer_one, tokenizer_two] |
| | train_dataset = get_train_dataset(args, accelerator) |
| | compute_embeddings_fn = functools.partial( |
| | compute_embeddings, |
| | proportion_empty_prompts=args.proportion_empty_prompts, |
| | text_encoders=text_encoders, |
| | tokenizers=tokenizers, |
| | ) |
| | with accelerator.main_process_first(): |
| | from datasets.fingerprint import Hasher |
| |
|
| | |
| | |
| | new_fingerprint = Hasher.hash(args) |
| | train_dataset = train_dataset.map(compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint) |
| |
|
| | |
| | train_dataset = prepare_train_dataset(train_dataset, accelerator) |
| |
|
| | train_dataloader = torch.utils.data.DataLoader( |
| | train_dataset, |
| | shuffle=True, |
| | collate_fn=collate_fn, |
| | batch_size=args.train_batch_size, |
| | num_workers=args.dataloader_num_workers, |
| | ) |
| |
|
| | |
| | overrode_max_train_steps = False |
| | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| | if args.max_train_steps is None: |
| | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| | overrode_max_train_steps = True |
| |
|
| | lr_scheduler = get_scheduler( |
| | args.lr_scheduler, |
| | optimizer=optimizer, |
| | num_warmup_steps=args.lr_warmup_steps, |
| | num_training_steps=args.max_train_steps, |
| | num_cycles=args.lr_num_cycles, |
| | power=args.lr_power, |
| | ) |
| |
|
| | |
| | t2iadapter, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
| | t2iadapter, optimizer, train_dataloader, lr_scheduler |
| | ) |
| |
|
| | |
| | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| | if overrode_max_train_steps: |
| | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| | |
| | args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
| |
|
| | |
| | |
| | if accelerator.is_main_process: |
| | tracker_config = dict(vars(args)) |
| |
|
| | |
| | tracker_config.pop("validation_prompt") |
| | tracker_config.pop("validation_image") |
| |
|
| | accelerator.init_trackers(args.tracker_project_name, config=tracker_config) |
| |
|
| | |
| | total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps |
| |
|
| | logger.info("***** Running training *****") |
| | logger.info(f" Num examples = {len(train_dataset)}") |
| | logger.info(f" Num batches each epoch = {len(train_dataloader)}") |
| | logger.info(f" Num Epochs = {args.num_train_epochs}") |
| | logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") |
| | logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") |
| | logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") |
| | logger.info(f" Total optimization steps = {args.max_train_steps}") |
| |
|
| | global_step = 0 |
| | first_epoch = 0 |
| |
|
| | |
| | if args.resume_from_checkpoint: |
| | if args.resume_from_checkpoint != "latest": |
| | path = os.path.basename(args.resume_from_checkpoint) |
| | else: |
| | |
| | dirs = os.listdir(args.output_dir) |
| | dirs = [d for d in dirs if d.startswith("checkpoint")] |
| | dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) |
| | path = dirs[-1] if len(dirs) > 0 else None |
| |
|
| | if path is None: |
| | accelerator.print( |
| | f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." |
| | ) |
| | args.resume_from_checkpoint = None |
| | initial_global_step = 0 |
| | else: |
| | accelerator.print(f"Resuming from checkpoint {path}") |
| | accelerator.load_state(os.path.join(args.output_dir, path)) |
| | global_step = int(path.split("-")[1]) |
| |
|
| | initial_global_step = global_step |
| | first_epoch = global_step // num_update_steps_per_epoch |
| | else: |
| | initial_global_step = 0 |
| |
|
| | progress_bar = tqdm( |
| | range(0, args.max_train_steps), |
| | initial=initial_global_step, |
| | desc="Steps", |
| | |
| | disable=not accelerator.is_local_main_process, |
| | ) |
| |
|
| | image_logs = None |
| | for epoch in range(first_epoch, args.num_train_epochs): |
| | for step, batch in enumerate(train_dataloader): |
| | with accelerator.accumulate(t2iadapter): |
| | if args.pretrained_vae_model_name_or_path is not None: |
| | pixel_values = batch["pixel_values"].to(dtype=weight_dtype) |
| | else: |
| | pixel_values = batch["pixel_values"] |
| |
|
| | |
| | latents = [] |
| | for i in range(0, pixel_values.shape[0], 8): |
| | latents.append(vae.encode(pixel_values[i : i + 8]).latent_dist.sample()) |
| | latents = torch.cat(latents, dim=0) |
| | latents = latents * vae.config.scaling_factor |
| | if args.pretrained_vae_model_name_or_path is None: |
| | latents = latents.to(weight_dtype) |
| |
|
| | |
| | noise = torch.randn_like(latents) |
| | bsz = latents.shape[0] |
| |
|
| | |
| | |
| | timesteps = torch.rand((bsz,), device=latents.device) |
| | timesteps = (1 - timesteps**3) * noise_scheduler.config.num_train_timesteps |
| | timesteps = timesteps.long().to(noise_scheduler.timesteps.dtype) |
| | timesteps = timesteps.clamp(0, noise_scheduler.config.num_train_timesteps - 1) |
| |
|
| | |
| | |
| | noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) |
| |
|
| | |
| | sigmas = get_sigmas(timesteps, len(noisy_latents.shape), noisy_latents.dtype) |
| | inp_noisy_latents = noisy_latents / ((sigmas**2 + 1) ** 0.5) |
| |
|
| | |
| | t2iadapter_image = batch["conditioning_pixel_values"].to(dtype=weight_dtype) |
| | down_block_additional_residuals = t2iadapter(t2iadapter_image) |
| | down_block_additional_residuals = [ |
| | sample.to(dtype=weight_dtype) for sample in down_block_additional_residuals |
| | ] |
| |
|
| | |
| | model_pred = unet( |
| | inp_noisy_latents, |
| | timesteps, |
| | encoder_hidden_states=batch["prompt_ids"], |
| | added_cond_kwargs=batch["unet_added_conditions"], |
| | down_block_additional_residuals=down_block_additional_residuals, |
| | return_dict=False, |
| | )[0] |
| |
|
| | |
| | denoised_latents = model_pred * (-sigmas) + noisy_latents |
| | weighing = sigmas**-2.0 |
| |
|
| | |
| | if noise_scheduler.config.prediction_type == "epsilon": |
| | target = latents |
| | elif noise_scheduler.config.prediction_type == "v_prediction": |
| | target = noise_scheduler.get_velocity(latents, noise, timesteps) |
| | else: |
| | raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") |
| |
|
| | |
| | loss = torch.mean( |
| | (weighing.float() * (denoised_latents.float() - target.float()) ** 2).reshape(target.shape[0], -1), |
| | dim=1, |
| | ) |
| | loss = loss.mean() |
| |
|
| | accelerator.backward(loss) |
| | if accelerator.sync_gradients: |
| | params_to_clip = t2iadapter.parameters() |
| | accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) |
| | optimizer.step() |
| | lr_scheduler.step() |
| | optimizer.zero_grad(set_to_none=args.set_grads_to_none) |
| |
|
| | |
| | if accelerator.sync_gradients: |
| | progress_bar.update(1) |
| | global_step += 1 |
| |
|
| | if accelerator.is_main_process: |
| | if global_step % args.checkpointing_steps == 0: |
| | |
| | if args.checkpoints_total_limit is not None: |
| | checkpoints = os.listdir(args.output_dir) |
| | checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] |
| | checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) |
| |
|
| | |
| | if len(checkpoints) >= args.checkpoints_total_limit: |
| | num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 |
| | removing_checkpoints = checkpoints[0:num_to_remove] |
| |
|
| | logger.info( |
| | f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" |
| | ) |
| | logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") |
| |
|
| | for removing_checkpoint in removing_checkpoints: |
| | removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) |
| | shutil.rmtree(removing_checkpoint) |
| |
|
| | save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") |
| | accelerator.save_state(save_path) |
| | logger.info(f"Saved state to {save_path}") |
| |
|
| | if args.validation_prompt is not None and global_step % args.validation_steps == 0: |
| | image_logs = log_validation( |
| | vae, |
| | unet, |
| | t2iadapter, |
| | args, |
| | accelerator, |
| | weight_dtype, |
| | global_step, |
| | ) |
| |
|
| | logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} |
| | progress_bar.set_postfix(**logs) |
| | accelerator.log(logs, step=global_step) |
| |
|
| | if global_step >= args.max_train_steps: |
| | break |
| |
|
| | |
| | accelerator.wait_for_everyone() |
| | if accelerator.is_main_process: |
| | t2iadapter = unwrap_model(t2iadapter) |
| | t2iadapter.save_pretrained(args.output_dir) |
| |
|
| | if args.push_to_hub: |
| | save_model_card( |
| | repo_id, |
| | image_logs=image_logs, |
| | base_model=args.pretrained_model_name_or_path, |
| | repo_folder=args.output_dir, |
| | ) |
| | upload_folder( |
| | repo_id=repo_id, |
| | folder_path=args.output_dir, |
| | commit_message="End of training", |
| | ignore_patterns=["step_*", "epoch_*"], |
| | ) |
| |
|
| | accelerator.end_training() |
| |
|
| |
|
| | if __name__ == "__main__": |
| | args = parse_args() |
| | main(args) |
| |
|