| | """Modified from https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py |
| | """ |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import argparse |
| | import gc |
| | import logging |
| | import math |
| | import os |
| | import pickle |
| | import random |
| | import shutil |
| | import sys |
| | from typing import (Any, Callable, Dict, List, NamedTuple, Optional, Tuple, |
| | Union) |
| |
|
| | import accelerate |
| | import diffusers |
| | import numpy as np |
| | import torch |
| | import torch.nn.functional as F |
| | import torch.utils.checkpoint |
| | import torchvision.transforms.functional as TF |
| | import transformers |
| | from accelerate import Accelerator |
| | from accelerate.logging import get_logger |
| | from accelerate.state import AcceleratorState |
| | from accelerate.utils import ProjectConfiguration, set_seed |
| | from diffusers import DDIMScheduler, FlowMatchEulerDiscreteScheduler |
| | from diffusers.optimization import get_scheduler |
| | from diffusers.training_utils import (EMAModel, |
| | compute_density_for_timestep_sampling, |
| | compute_loss_weighting_for_sd3) |
| | from diffusers.utils import check_min_version, deprecate, is_wandb_available |
| | from diffusers.utils.torch_utils import is_compiled_module |
| | from einops import rearrange |
| | from omegaconf import OmegaConf |
| | from packaging import version |
| | from PIL import Image |
| | from torch.utils.data import RandomSampler |
| | from torch.utils.tensorboard import SummaryWriter |
| | from torchvision import transforms |
| | from tqdm.auto import tqdm |
| | from transformers import AutoTokenizer |
| | from transformers.utils import ContextManagers |
| |
|
| | import datasets |
| |
|
| | current_file_path = os.path.abspath(__file__) |
| | project_roots = [os.path.dirname(current_file_path), os.path.dirname(os.path.dirname(current_file_path)), os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))] |
| | for project_root in project_roots: |
| | sys.path.insert(0, project_root) if project_root not in sys.path else None |
| |
|
| | from videox_fun.data.bucket_sampler import (ASPECT_RATIO_512, |
| | ASPECT_RATIO_RANDOM_CROP_512, |
| | ASPECT_RATIO_RANDOM_CROP_PROB, |
| | AspectRatioBatchImageVideoSampler, |
| | RandomSampler, get_closest_ratio) |
| | from videox_fun.data.dataset_image_video import (ImageVideoDataset, |
| | ImageVideoSampler, |
| | get_random_mask) |
| | from videox_fun.dist import set_multi_gpus_devices, shard_model |
| | from videox_fun.models import (AutoencoderKL, AutoTokenizer, |
| | CLIPImageProcessor, |
| | CLIPVisionModelWithProjection, |
| | Qwen2_5_VLForConditionalGeneration, |
| | Qwen2Tokenizer, Qwen3ForCausalLM, |
| | QwenImageTransformer2DModel, |
| | ZImageTransformer2DModel) |
| | from videox_fun.pipeline import Flux2Pipeline |
| | from videox_fun.utils.discrete_sampler import DiscreteSampling |
| | from videox_fun.utils.utils import get_image_to_video_latent, save_videos_grid |
| |
|
| | if is_wandb_available(): |
| | import wandb |
| |
|
| | def filter_kwargs(cls, kwargs): |
| | import inspect |
| | sig = inspect.signature(cls.__init__) |
| | valid_params = set(sig.parameters.keys()) - {'self', 'cls'} |
| | filtered_kwargs = {k: v for k, v in kwargs.items() if k in valid_params} |
| | return filtered_kwargs |
| |
|
| | def linear_decay(initial_value, final_value, total_steps, current_step): |
| | if current_step >= total_steps: |
| | return final_value |
| | current_step = max(0, current_step) |
| | step_size = (final_value - initial_value) / total_steps |
| | current_value = initial_value + step_size * current_step |
| | return current_value |
| |
|
| | def generate_timestep_with_lognorm(low, high, shape, device="cpu", generator=None): |
| | u = torch.normal(mean=0.0, std=1.0, size=shape, device=device, generator=generator) |
| | t = 1 / (1 + torch.exp(-u)) * (high - low) + low |
| | return torch.clip(t.to(torch.int32), low, high - 1) |
| |
|
| | def compute_empirical_mu(image_seq_len: int, num_steps: int) -> float: |
| | a1, b1 = 8.73809524e-05, 1.89833333 |
| | a2, b2 = 0.00016927, 0.45666666 |
| |
|
| | if image_seq_len > 4300: |
| | mu = a2 * image_seq_len + b2 |
| | return float(mu) |
| |
|
| | m_200 = a2 * image_seq_len + b2 |
| | m_10 = a1 * image_seq_len + b1 |
| |
|
| | a = (m_200 - m_10) / 190.0 |
| | b = m_200 - 200.0 * a |
| | mu = a * num_steps + b |
| |
|
| | return float(mu) |
| |
|
| | def calculate_shift( |
| | image_seq_len, |
| | base_seq_len: int = 256, |
| | max_seq_len: int = 4096, |
| | base_shift: float = 0.5, |
| | max_shift: float = 1.15, |
| | ): |
| | m = (max_shift - base_shift) / (max_seq_len - base_seq_len) |
| | b = base_shift - m * base_seq_len |
| | mu = image_seq_len * m + b |
| | return mu |
| |
|
| | def encode_prompt( |
| | prompt: Union[str, List[str]], |
| | device: Optional[torch.device] = None, |
| | text_encoder = None, |
| | tokenizer = None, |
| | max_sequence_length: int = 512, |
| | ) -> List[torch.FloatTensor]: |
| | if isinstance(prompt, str): |
| | prompt = [prompt] |
| |
|
| | for i, prompt_item in enumerate(prompt): |
| | messages = [ |
| | {"role": "user", "content": prompt_item}, |
| | ] |
| | prompt_item = tokenizer.apply_chat_template( |
| | messages, |
| | tokenize=False, |
| | add_generation_prompt=True, |
| | enable_thinking=True, |
| | ) |
| | prompt[i] = prompt_item |
| |
|
| | text_inputs = tokenizer( |
| | prompt, |
| | padding="max_length", |
| | max_length=max_sequence_length, |
| | truncation=True, |
| | return_tensors="pt", |
| | ) |
| |
|
| | text_input_ids = text_inputs.input_ids.to(device) |
| | prompt_masks = text_inputs.attention_mask.to(device).bool() |
| |
|
| | prompt_embeds = text_encoder( |
| | input_ids=text_input_ids, |
| | attention_mask=prompt_masks, |
| | output_hidden_states=True, |
| | ).hidden_states[-2] |
| |
|
| | embeddings_list = [] |
| |
|
| | for i in range(len(prompt_embeds)): |
| | embeddings_list.append(prompt_embeds[i][prompt_masks[i]]) |
| |
|
| | return embeddings_list |
| |
|
| | |
| | check_min_version("0.18.0.dev0") |
| |
|
| | logger = get_logger(__name__, log_level="INFO") |
| |
|
| | def log_validation(vae, text_encoder, tokenizer, transformer3d, network, args, accelerator, weight_dtype, global_step): |
| | try: |
| | logger.info("Running validation... ") |
| |
|
| | transformer3d_val = ZImageTransformer2DModel.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="transformer", torch_dtype=weight_dtype, |
| | low_cpu_mem_usage=True, |
| | ).to(weight_dtype) |
| | transformer3d_val.load_state_dict(accelerator.unwrap_model(transformer3d).state_dict()) |
| | scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="scheduler" |
| | ) |
| | transformer3d = transformer3d.to("cpu") |
| | pipeline = Flux2Pipeline( |
| | vae=accelerator.unwrap_model(vae).to(weight_dtype), |
| | text_encoder=accelerator.unwrap_model(text_encoder), |
| | tokenizer=tokenizer, |
| | transformer=transformer3d_val, |
| | scheduler=scheduler, |
| | ) |
| | pipeline = pipeline.to(accelerator.device) |
| |
|
| | if args.seed is None: |
| | generator = None |
| | else: |
| | generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) |
| |
|
| | for i in range(len(args.validation_prompts)): |
| | with torch.no_grad(): |
| | sample = pipeline( |
| | args.validation_prompts[i], |
| | negative_prompt = "bad detailed", |
| | height = args.image_sample_size, |
| | width = args.image_sample_size, |
| | generator = generator |
| | ).images |
| | os.makedirs(os.path.join(args.output_dir, "sample"), exist_ok=True) |
| | image = sample[0].save(os.path.join(args.output_dir, f"sample/sample-{global_step}-image-{i}.gif")) |
| |
|
| | del pipeline |
| | del transformer3d_val |
| | gc.collect() |
| | torch.cuda.empty_cache() |
| | torch.cuda.ipc_collect() |
| | transformer3d = transformer3d.to(accelerator.device) |
| | except Exception as e: |
| | gc.collect() |
| | torch.cuda.empty_cache() |
| | torch.cuda.ipc_collect() |
| | print(f"Eval error with info {e}") |
| | transformer3d = transformer3d.to(accelerator.device) |
| |
|
| | def parse_args(): |
| | parser = argparse.ArgumentParser(description="Simple example of a training script.") |
| | parser.add_argument( |
| | "--input_perturbation", type=float, default=0, help="The scale of input perturbation. Recommended 0.1." |
| | ) |
| | parser.add_argument( |
| | "--pretrained_model_name_or_path", |
| | type=str, |
| | default=None, |
| | required=True, |
| | help="Path to pretrained model or model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--revision", |
| | type=str, |
| | default=None, |
| | required=False, |
| | help="Revision of pretrained model identifier from huggingface.co/models.", |
| | ) |
| | parser.add_argument( |
| | "--variant", |
| | type=str, |
| | default=None, |
| | help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", |
| | ) |
| | parser.add_argument( |
| | "--train_data_dir", |
| | type=str, |
| | default=None, |
| | help=( |
| | "A folder containing the training data. " |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--train_data_meta", |
| | type=str, |
| | default=None, |
| | help=( |
| | "A csv containing the training data. " |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--max_train_samples", |
| | type=int, |
| | default=None, |
| | help=( |
| | "For debugging purposes or quicker training, truncate the number of training examples to this " |
| | "value if set." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--validation_prompts", |
| | type=str, |
| | default=None, |
| | nargs="+", |
| | help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), |
| | ) |
| | parser.add_argument( |
| | "--output_dir", |
| | type=str, |
| | default="sd-model-finetuned", |
| | help="The output directory where the model predictions and checkpoints will be written.", |
| | ) |
| | parser.add_argument( |
| | "--cache_dir", |
| | type=str, |
| | default=None, |
| | help="The directory where the downloaded models and datasets will be stored.", |
| | ) |
| | parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") |
| | parser.add_argument( |
| | "--random_flip", |
| | action="store_true", |
| | help="whether to randomly flip images horizontally", |
| | ) |
| | parser.add_argument( |
| | "--use_came", |
| | action="store_true", |
| | help="whether to use came", |
| | ) |
| | parser.add_argument( |
| | "--multi_stream", |
| | action="store_true", |
| | help="whether to use cuda multi-stream", |
| | ) |
| | parser.add_argument( |
| | "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." |
| | ) |
| | parser.add_argument( |
| | "--vae_mini_batch", type=int, default=32, help="mini batch size for vae." |
| | ) |
| | parser.add_argument("--num_train_epochs", type=int, default=100) |
| | parser.add_argument( |
| | "--max_train_steps", |
| | type=int, |
| | default=None, |
| | help="Total number of training steps to perform. If provided, overrides num_train_epochs.", |
| | ) |
| | parser.add_argument( |
| | "--gradient_accumulation_steps", |
| | type=int, |
| | default=1, |
| | help="Number of updates steps to accumulate before performing a backward/update pass.", |
| | ) |
| | parser.add_argument( |
| | "--gradient_checkpointing", |
| | action="store_true", |
| | help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", |
| | ) |
| | parser.add_argument( |
| | "--learning_rate", |
| | type=float, |
| | default=1e-4, |
| | help="Initial learning rate (after the potential warmup period) to use.", |
| | ) |
| | parser.add_argument( |
| | "--scale_lr", |
| | action="store_true", |
| | default=False, |
| | help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", |
| | ) |
| | parser.add_argument( |
| | "--lr_scheduler", |
| | type=str, |
| | default="constant", |
| | help=( |
| | 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' |
| | ' "constant", "constant_with_warmup"]' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." |
| | ) |
| | parser.add_argument( |
| | "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." |
| | ) |
| | parser.add_argument( |
| | "--allow_tf32", |
| | action="store_true", |
| | help=( |
| | "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" |
| | " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" |
| | ), |
| | ) |
| | parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") |
| | parser.add_argument( |
| | "--non_ema_revision", |
| | type=str, |
| | default=None, |
| | required=False, |
| | help=( |
| | "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" |
| | " remote repository specified with --pretrained_model_name_or_path." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--dataloader_num_workers", |
| | type=int, |
| | default=0, |
| | help=( |
| | "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." |
| | ), |
| | ) |
| | parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") |
| | parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") |
| | parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") |
| | parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") |
| | parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") |
| | parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") |
| | parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") |
| | parser.add_argument( |
| | "--prediction_type", |
| | type=str, |
| | default=None, |
| | help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.", |
| | ) |
| | parser.add_argument( |
| | "--hub_model_id", |
| | type=str, |
| | default=None, |
| | help="The name of the repository to keep in sync with the local `output_dir`.", |
| | ) |
| | parser.add_argument( |
| | "--logging_dir", |
| | type=str, |
| | default="logs", |
| | help=( |
| | "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" |
| | " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--report_model_info", action="store_true", help="Whether or not to report more info about model (such as norm, grad)." |
| | ) |
| | parser.add_argument( |
| | "--mixed_precision", |
| | type=str, |
| | default=None, |
| | choices=["no", "fp16", "bf16"], |
| | help=( |
| | "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" |
| | " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" |
| | " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--report_to", |
| | type=str, |
| | default="tensorboard", |
| | help=( |
| | 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' |
| | ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' |
| | ), |
| | ) |
| | parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") |
| | parser.add_argument( |
| | "--checkpointing_steps", |
| | type=int, |
| | default=500, |
| | help=( |
| | "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" |
| | " training using `--resume_from_checkpoint`." |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--checkpoints_total_limit", |
| | type=int, |
| | default=None, |
| | help=("Max number of checkpoints to store."), |
| | ) |
| | parser.add_argument( |
| | "--resume_from_checkpoint", |
| | type=str, |
| | default=None, |
| | help=( |
| | "Whether training should be resumed from a previous checkpoint. Use a path saved by" |
| | ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' |
| | ), |
| | ) |
| | parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") |
| | parser.add_argument( |
| | "--validation_epochs", |
| | type=int, |
| | default=5, |
| | help="Run validation every X epochs.", |
| | ) |
| | parser.add_argument( |
| | "--validation_steps", |
| | type=int, |
| | default=2000, |
| | help="Run validation every X steps.", |
| | ) |
| | parser.add_argument( |
| | "--tracker_project_name", |
| | type=str, |
| | default="text2image-fine-tune", |
| | help=( |
| | "The `project_name` argument passed to Accelerator.init_trackers for" |
| | " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" |
| | ), |
| | ) |
| | |
| | parser.add_argument( |
| | "--snr_loss", action="store_true", help="Whether or not to use snr_loss." |
| | ) |
| | parser.add_argument( |
| | "--uniform_sampling", action="store_true", help="Whether or not to use uniform_sampling." |
| | ) |
| | parser.add_argument( |
| | "--enable_text_encoder_in_dataloader", action="store_true", help="Whether or not to use text encoder in dataloader." |
| | ) |
| | parser.add_argument( |
| | "--enable_bucket", action="store_true", help="Whether enable bucket sample in datasets." |
| | ) |
| | parser.add_argument( |
| | "--random_ratio_crop", action="store_true", help="Whether enable random ratio crop sample in datasets." |
| | ) |
| | parser.add_argument( |
| | "--random_hw_adapt", action="store_true", help="Whether enable random adapt height and width in datasets." |
| | ) |
| | parser.add_argument( |
| | "--train_sampling_steps", |
| | type=int, |
| | default=1000, |
| | help="Run train_sampling_steps.", |
| | ) |
| | parser.add_argument( |
| | "--image_sample_size", |
| | type=int, |
| | default=512, |
| | help="Sample size of the image.", |
| | ) |
| | parser.add_argument( |
| | "--fix_sample_size", |
| | nargs=2, type=int, default=None, |
| | help="Fix Sample size [height, width] when using bucket and collate_fn." |
| | ) |
| | parser.add_argument( |
| | "--transformer_path", |
| | type=str, |
| | default=None, |
| | help=("If you want to load the weight from other transformers, input its path."), |
| | ) |
| | parser.add_argument( |
| | "--vae_path", |
| | type=str, |
| | default=None, |
| | help=("If you want to load the weight from other vaes, input its path."), |
| | ) |
| |
|
| | parser.add_argument( |
| | '--trainable_modules', |
| | nargs='+', |
| | help='Enter a list of trainable modules' |
| | ) |
| | parser.add_argument( |
| | '--trainable_modules_low_learning_rate', |
| | nargs='+', |
| | default=[], |
| | help='Enter a list of trainable modules with lower learning rate' |
| | ) |
| | parser.add_argument( |
| | '--tokenizer_max_length', |
| | type=int, |
| | default=512, |
| | help='Max length of tokenizer' |
| | ) |
| | parser.add_argument( |
| | "--use_deepspeed", action="store_true", help="Whether or not to use deepspeed." |
| | ) |
| | parser.add_argument( |
| | "--use_fsdp", action="store_true", help="Whether or not to use fsdp." |
| | ) |
| | parser.add_argument( |
| | "--low_vram", action="store_true", help="Whether enable low_vram mode." |
| | ) |
| | parser.add_argument( |
| | "--prompt_template_encode", |
| | type=str, |
| | default="<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n", |
| | help=( |
| | 'The prompt template for text encoder.' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--prompt_template_encode_start_idx", |
| | type=int, |
| | default=34, |
| | help=( |
| | 'The start idx for prompt template.' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--abnormal_norm_clip_start", |
| | type=int, |
| | default=1000, |
| | help=( |
| | 'When do we start doing additional processing on abnormal gradients. ' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--initial_grad_norm_ratio", |
| | type=int, |
| | default=5, |
| | help=( |
| | 'The initial gradient is relative to the multiple of the max_grad_norm. ' |
| | ), |
| | ) |
| | parser.add_argument( |
| | "--weighting_scheme", |
| | type=str, |
| | default="none", |
| | choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"], |
| | help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'), |
| | ) |
| | parser.add_argument( |
| | "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme." |
| | ) |
| | parser.add_argument( |
| | "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme." |
| | ) |
| | parser.add_argument( |
| | "--mode_scale", |
| | type=float, |
| | default=1.29, |
| | help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.", |
| | ) |
| |
|
| | args = parser.parse_args() |
| | env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) |
| | if env_local_rank != -1 and env_local_rank != args.local_rank: |
| | args.local_rank = env_local_rank |
| |
|
| | |
| | if args.non_ema_revision is None: |
| | args.non_ema_revision = args.revision |
| |
|
| | return args |
| |
|
| |
|
| | def main(): |
| | args = parse_args() |
| |
|
| | if args.report_to == "wandb" and args.hub_token is not None: |
| | raise ValueError( |
| | "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." |
| | " Please use `huggingface-cli login` to authenticate with the Hub." |
| | ) |
| |
|
| | if args.non_ema_revision is not None: |
| | deprecate( |
| | "non_ema_revision!=None", |
| | "0.15.0", |
| | message=( |
| | "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" |
| | " use `--variant=non_ema` instead." |
| | ), |
| | ) |
| | logging_dir = os.path.join(args.output_dir, args.logging_dir) |
| |
|
| | accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) |
| |
|
| | accelerator = Accelerator( |
| | gradient_accumulation_steps=args.gradient_accumulation_steps, |
| | mixed_precision=args.mixed_precision, |
| | log_with=args.report_to, |
| | project_config=accelerator_project_config, |
| | ) |
| |
|
| | deepspeed_plugin = accelerator.state.deepspeed_plugin if hasattr(accelerator.state, "deepspeed_plugin") else None |
| | fsdp_plugin = accelerator.state.fsdp_plugin if hasattr(accelerator.state, "fsdp_plugin") else None |
| | if deepspeed_plugin is not None: |
| | zero_stage = int(deepspeed_plugin.zero_stage) |
| | fsdp_stage = 0 |
| | print(f"Using DeepSpeed Zero stage: {zero_stage}") |
| |
|
| | args.use_deepspeed = True |
| | if zero_stage == 3: |
| | print(f"Auto set save_state to True because zero_stage == 3") |
| | args.save_state = True |
| | elif fsdp_plugin is not None: |
| | from torch.distributed.fsdp import ShardingStrategy |
| | zero_stage = 0 |
| | if fsdp_plugin.sharding_strategy is ShardingStrategy.FULL_SHARD: |
| | fsdp_stage = 3 |
| | elif fsdp_plugin.sharding_strategy is None: |
| | fsdp_stage = 3 |
| | elif fsdp_plugin.sharding_strategy is ShardingStrategy.SHARD_GRAD_OP: |
| | fsdp_stage = 2 |
| | else: |
| | fsdp_stage = 0 |
| | print(f"Using FSDP stage: {fsdp_stage}") |
| |
|
| | args.use_fsdp = True |
| | if fsdp_stage == 3: |
| | print(f"Auto set save_state to True because fsdp_stage == 3") |
| | args.save_state = True |
| | else: |
| | zero_stage = 0 |
| | fsdp_stage = 0 |
| | print("DeepSpeed is not enabled.") |
| |
|
| | if accelerator.is_main_process: |
| | writer = SummaryWriter(log_dir=logging_dir) |
| |
|
| | |
| | logging.basicConfig( |
| | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| | datefmt="%m/%d/%Y %H:%M:%S", |
| | level=logging.INFO, |
| | ) |
| | logger.info(accelerator.state, main_process_only=False) |
| | if accelerator.is_local_main_process: |
| | datasets.utils.logging.set_verbosity_warning() |
| | transformers.utils.logging.set_verbosity_warning() |
| | diffusers.utils.logging.set_verbosity_info() |
| | else: |
| | datasets.utils.logging.set_verbosity_error() |
| | transformers.utils.logging.set_verbosity_error() |
| | diffusers.utils.logging.set_verbosity_error() |
| |
|
| | |
| | if args.seed is not None: |
| | set_seed(args.seed) |
| | rng = np.random.default_rng(np.random.PCG64(args.seed + accelerator.process_index)) |
| | torch_rng = torch.Generator(accelerator.device).manual_seed(args.seed + accelerator.process_index) |
| | else: |
| | rng = None |
| | torch_rng = None |
| | index_rng = np.random.default_rng(np.random.PCG64(43)) |
| | print(f"Init rng with seed {args.seed + accelerator.process_index}. Process_index is {accelerator.process_index}") |
| |
|
| | |
| | if accelerator.is_main_process: |
| | if args.output_dir is not None: |
| | os.makedirs(args.output_dir, exist_ok=True) |
| |
|
| | |
| | |
| | weight_dtype = torch.float32 |
| | if accelerator.mixed_precision == "fp16": |
| | weight_dtype = torch.float16 |
| | args.mixed_precision = accelerator.mixed_precision |
| | elif accelerator.mixed_precision == "bf16": |
| | weight_dtype = torch.bfloat16 |
| | args.mixed_precision = accelerator.mixed_precision |
| |
|
| | |
| | noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="scheduler" |
| | ) |
| |
|
| | |
| | tokenizer = AutoTokenizer.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="tokenizer" |
| | ) |
| |
|
| | def deepspeed_zero_init_disabled_context_manager(): |
| | """ |
| | returns either a context list that includes one that will disable zero.Init or an empty context list |
| | """ |
| | deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None |
| | if deepspeed_plugin is None: |
| | return [] |
| |
|
| | return [deepspeed_plugin.zero3_init_context_manager(enable=False)] |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | with ContextManagers(deepspeed_zero_init_disabled_context_manager()): |
| | |
| | text_encoder = Qwen3ForCausalLM.from_pretrained( |
| | args.pretrained_model_name_or_path, subfolder="text_encoder", torch_dtype=weight_dtype |
| | ) |
| | text_encoder = text_encoder.eval() |
| | |
| | vae = AutoencoderKL.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="vae" |
| | ).to(weight_dtype) |
| | vae.eval() |
| | |
| | |
| | transformer3d = ZImageTransformer2DModel.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="transformer", |
| | torch_dtype=weight_dtype, |
| | ).to(weight_dtype) |
| |
|
| | |
| | vae.requires_grad_(False) |
| | text_encoder.requires_grad_(False) |
| | transformer3d.requires_grad_(False) |
| |
|
| | if args.transformer_path is not None: |
| | print(f"From checkpoint: {args.transformer_path}") |
| | if args.transformer_path.endswith("safetensors"): |
| | from safetensors.torch import load_file, safe_open |
| | state_dict = load_file(args.transformer_path) |
| | else: |
| | state_dict = torch.load(args.transformer_path, map_location="cpu") |
| | state_dict = state_dict["state_dict"] if "state_dict" in state_dict else state_dict |
| |
|
| | m, u = transformer3d.load_state_dict(state_dict, strict=False) |
| | print(f"missing keys: {len(m)}, unexpected keys: {len(u)}") |
| | assert len(u) == 0 |
| |
|
| | if args.vae_path is not None: |
| | print(f"From checkpoint: {args.vae_path}") |
| | if args.vae_path.endswith("safetensors"): |
| | from safetensors.torch import load_file, safe_open |
| | state_dict = load_file(args.vae_path) |
| | else: |
| | state_dict = torch.load(args.vae_path, map_location="cpu") |
| | state_dict = state_dict["state_dict"] if "state_dict" in state_dict else state_dict |
| |
|
| | m, u = vae.load_state_dict(state_dict, strict=False) |
| | print(f"missing keys: {len(m)}, unexpected keys: {len(u)}") |
| | assert len(u) == 0 |
| | |
| | |
| | |
| | |
| | transformer3d.train() |
| | if accelerator.is_main_process: |
| | accelerator.print( |
| | f"Trainable modules '{args.trainable_modules}'." |
| | ) |
| | for name, param in transformer3d.named_parameters(): |
| | for trainable_module_name in args.trainable_modules + args.trainable_modules_low_learning_rate: |
| | if trainable_module_name in name: |
| | param.requires_grad = True |
| | break |
| |
|
| | |
| | if args.use_ema: |
| | if zero_stage == 3: |
| | raise NotImplementedError("FSDP does not support EMA.") |
| |
|
| | ema_transformer3d = ZImageTransformer2DModel.from_pretrained( |
| | args.pretrained_model_name_or_path, |
| | subfolder="transformer", |
| | torch_dtype=weight_dtype, |
| | ).to(weight_dtype) |
| |
|
| | ema_transformer3d = EMAModel(ema_transformer3d.parameters(), model_cls=ZImageTransformer2DModel, model_config=ema_transformer3d.config) |
| |
|
| | |
| | if version.parse(accelerate.__version__) >= version.parse("0.16.0"): |
| | |
| | if fsdp_stage != 0: |
| | def save_model_hook(models, weights, output_dir): |
| | accelerate_state_dict = accelerator.get_state_dict(models[-1], unwrap=True) |
| | if accelerator.is_main_process: |
| | from safetensors.torch import save_file |
| |
|
| | safetensor_save_path = os.path.join(output_dir, f"diffusion_pytorch_model.safetensors") |
| | accelerate_state_dict = {k: v.to(dtype=weight_dtype) for k, v in accelerate_state_dict.items()} |
| | save_file(accelerate_state_dict, safetensor_save_path, metadata={"format": "pt"}) |
| |
|
| | with open(os.path.join(output_dir, "sampler_pos_start.pkl"), 'wb') as file: |
| | pickle.dump([batch_sampler.sampler._pos_start, first_epoch], file) |
| |
|
| | def load_model_hook(models, input_dir): |
| | pkl_path = os.path.join(input_dir, "sampler_pos_start.pkl") |
| | if os.path.exists(pkl_path): |
| | with open(pkl_path, 'rb') as file: |
| | loaded_number, _ = pickle.load(file) |
| | batch_sampler.sampler._pos_start = max(loaded_number - args.dataloader_num_workers * accelerator.num_processes * 2, 0) |
| | print(f"Load pkl from {pkl_path}. Get loaded_number = {loaded_number}.") |
| |
|
| | elif zero_stage == 3: |
| | |
| | def save_model_hook(models, weights, output_dir): |
| | accelerate_state_dict = accelerator.get_state_dict(models[-1], unwrap=True) |
| | if accelerator.is_main_process: |
| | from safetensors.torch import save_file |
| | safetensor_save_path = os.path.join(output_dir, f"diffusion_pytorch_model.safetensors") |
| | save_file(accelerate_state_dict, safetensor_save_path, metadata={"format": "pt"}) |
| |
|
| | with open(os.path.join(output_dir, "sampler_pos_start.pkl"), 'wb') as file: |
| | pickle.dump([batch_sampler.sampler._pos_start, first_epoch], file) |
| |
|
| | def load_model_hook(models, input_dir): |
| | pkl_path = os.path.join(input_dir, "sampler_pos_start.pkl") |
| | if os.path.exists(pkl_path): |
| | with open(pkl_path, 'rb') as file: |
| | loaded_number, _ = pickle.load(file) |
| | batch_sampler.sampler._pos_start = max(loaded_number - args.dataloader_num_workers * accelerator.num_processes * 2, 0) |
| | print(f"Load pkl from {pkl_path}. Get loaded_number = {loaded_number}.") |
| | else: |
| | |
| | def save_model_hook(models, weights, output_dir): |
| | if accelerator.is_main_process: |
| | if args.use_ema: |
| | ema_transformer3d.save_pretrained(os.path.join(output_dir, "transformer_ema")) |
| |
|
| | models[0].save_pretrained(os.path.join(output_dir, "transformer")) |
| | if not args.use_deepspeed: |
| | weights.pop() |
| |
|
| | with open(os.path.join(output_dir, "sampler_pos_start.pkl"), 'wb') as file: |
| | pickle.dump([batch_sampler.sampler._pos_start, first_epoch], file) |
| |
|
| | def load_model_hook(models, input_dir): |
| | if args.use_ema: |
| | ema_path = os.path.join(input_dir, "transformer_ema") |
| | _, ema_kwargs = ZImageTransformer2DModel.load_config(ema_path, return_unused_kwargs=True) |
| | load_model = ZImageTransformer2DModel.from_pretrained( |
| | input_dir, subfolder="transformer_ema", |
| | ) |
| | load_model = EMAModel(load_model.parameters(), model_cls=ZImageTransformer2DModel, model_config=load_model.config) |
| | load_model.load_state_dict(ema_kwargs) |
| |
|
| | ema_transformer3d.load_state_dict(load_model.state_dict()) |
| | ema_transformer3d.to(accelerator.device) |
| | del load_model |
| |
|
| | for i in range(len(models)): |
| | |
| | model = models.pop() |
| |
|
| | |
| | load_model = ZImageTransformer2DModel.from_pretrained( |
| | input_dir, subfolder="transformer" |
| | ) |
| | model.register_to_config(**load_model.config) |
| |
|
| | model.load_state_dict(load_model.state_dict()) |
| | del load_model |
| |
|
| | pkl_path = os.path.join(input_dir, "sampler_pos_start.pkl") |
| | if os.path.exists(pkl_path): |
| | with open(pkl_path, 'rb') as file: |
| | loaded_number, _ = pickle.load(file) |
| | batch_sampler.sampler._pos_start = max(loaded_number - args.dataloader_num_workers * accelerator.num_processes * 2, 0) |
| | print(f"Load pkl from {pkl_path}. Get loaded_number = {loaded_number}.") |
| |
|
| | accelerator.register_save_state_pre_hook(save_model_hook) |
| | accelerator.register_load_state_pre_hook(load_model_hook) |
| |
|
| | if args.gradient_checkpointing: |
| | transformer3d.enable_gradient_checkpointing() |
| |
|
| | |
| | |
| | if args.allow_tf32: |
| | torch.backends.cuda.matmul.allow_tf32 = True |
| |
|
| | if args.scale_lr: |
| | args.learning_rate = ( |
| | args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes |
| | ) |
| |
|
| | |
| | if args.use_8bit_adam: |
| | try: |
| | import bitsandbytes as bnb |
| | except ImportError: |
| | raise ImportError( |
| | "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" |
| | ) |
| |
|
| | optimizer_cls = bnb.optim.AdamW8bit |
| | elif args.use_came: |
| | try: |
| | from came_pytorch import CAME |
| | except: |
| | raise ImportError( |
| | "Please install came_pytorch to use CAME. You can do so by running `pip install came_pytorch`" |
| | ) |
| |
|
| | optimizer_cls = CAME |
| | else: |
| | optimizer_cls = torch.optim.AdamW |
| |
|
| | trainable_params = list(filter(lambda p: p.requires_grad, transformer3d.parameters())) |
| | trainable_params_optim = [ |
| | {'params': [], 'lr': args.learning_rate}, |
| | {'params': [], 'lr': args.learning_rate / 2}, |
| | ] |
| | in_already = [] |
| | for name, param in transformer3d.named_parameters(): |
| | high_lr_flag = False |
| | if name in in_already: |
| | continue |
| | for trainable_module_name in args.trainable_modules: |
| | if trainable_module_name in name: |
| | in_already.append(name) |
| | high_lr_flag = True |
| | trainable_params_optim[0]['params'].append(param) |
| | if accelerator.is_main_process: |
| | print(f"Set {name} to lr : {args.learning_rate}") |
| | break |
| | if high_lr_flag: |
| | continue |
| | for trainable_module_name in args.trainable_modules_low_learning_rate: |
| | if trainable_module_name in name: |
| | in_already.append(name) |
| | trainable_params_optim[1]['params'].append(param) |
| | if accelerator.is_main_process: |
| | print(f"Set {name} to lr : {args.learning_rate / 2}") |
| | break |
| |
|
| | if args.use_came: |
| | optimizer = optimizer_cls( |
| | trainable_params_optim, |
| | lr=args.learning_rate, |
| | |
| | betas=(0.9, 0.999, 0.9999), |
| | eps=(1e-30, 1e-16) |
| | ) |
| | else: |
| | optimizer = optimizer_cls( |
| | trainable_params_optim, |
| | lr=args.learning_rate, |
| | betas=(args.adam_beta1, args.adam_beta2), |
| | weight_decay=args.adam_weight_decay, |
| | eps=args.adam_epsilon, |
| | ) |
| |
|
| | |
| | if args.fix_sample_size is not None and args.enable_bucket: |
| | args.image_sample_size = max(max(args.fix_sample_size), args.image_sample_size) |
| | args.random_hw_adapt = False |
| |
|
| | |
| | train_dataset = ImageVideoDataset( |
| | args.train_data_meta, args.train_data_dir, |
| | image_sample_size=args.image_sample_size, |
| | enable_bucket=args.enable_bucket, |
| | ) |
| |
|
| | def worker_init_fn(_seed): |
| | _seed = _seed * 256 |
| | def _worker_init_fn(worker_id): |
| | print(f"worker_init_fn with {_seed + worker_id}") |
| | np.random.seed(_seed + worker_id) |
| | random.seed(_seed + worker_id) |
| | return _worker_init_fn |
| | |
| | if args.enable_bucket: |
| | aspect_ratio_sample_size = {key : [x / 512 * args.image_sample_size for x in ASPECT_RATIO_512[key]] for key in ASPECT_RATIO_512.keys()} |
| | batch_sampler_generator = torch.Generator().manual_seed(args.seed) |
| | batch_sampler = AspectRatioBatchImageVideoSampler( |
| | sampler=RandomSampler(train_dataset, generator=batch_sampler_generator), dataset=train_dataset.dataset, |
| | batch_size=args.train_batch_size, train_folder = args.train_data_dir, drop_last=True, |
| | aspect_ratios=aspect_ratio_sample_size, |
| | ) |
| |
|
| | def collate_fn(examples): |
| | def get_random_downsample_ratio(sample_size, image_ratio=[], |
| | all_choices=False, rng=None): |
| | def _create_special_list(length): |
| | if length == 1: |
| | return [1.0] |
| | if length >= 2: |
| | first_element = 0.90 |
| | remaining_sum = 1.0 - first_element |
| | other_elements_value = remaining_sum / (length - 1) |
| | special_list = [first_element] + [other_elements_value] * (length - 1) |
| | return special_list |
| | |
| | if sample_size >= 1536: |
| | number_list = [1, 1.25, 1.5, 2, 2.5, 3] + image_ratio |
| | elif sample_size >= 1024: |
| | number_list = [1, 1.25, 1.5, 2] + image_ratio |
| | elif sample_size >= 768: |
| | number_list = [1, 1.25, 1.5] + image_ratio |
| | elif sample_size >= 512: |
| | number_list = [1] + image_ratio |
| | else: |
| | number_list = [1] |
| |
|
| | if all_choices: |
| | return number_list |
| |
|
| | number_list_prob = np.array(_create_special_list(len(number_list))) |
| | if rng is None: |
| | return np.random.choice(number_list, p = number_list_prob) |
| | else: |
| | return rng.choice(number_list, p = number_list_prob) |
| |
|
| | |
| | new_examples = {} |
| | new_examples["pixel_values"] = [] |
| | new_examples["text"] = [] |
| |
|
| | |
| | pixel_value = examples[0]["pixel_values"] |
| | data_type = examples[0]["data_type"] |
| | f, h, w, c = np.shape(pixel_value) |
| |
|
| | random_downsample_ratio = 1 if not args.random_hw_adapt else get_random_downsample_ratio(args.image_sample_size) |
| |
|
| | aspect_ratio_sample_size = {key : [x / 512 * args.image_sample_size / random_downsample_ratio for x in ASPECT_RATIO_512[key]] for key in ASPECT_RATIO_512.keys()} |
| | aspect_ratio_random_crop_sample_size = {key : [x / 512 * args.image_sample_size / random_downsample_ratio for x in ASPECT_RATIO_RANDOM_CROP_512[key]] for key in ASPECT_RATIO_RANDOM_CROP_512.keys()} |
| | |
| | if args.fix_sample_size is not None: |
| | fix_sample_size = [int(x / 16) * 16 for x in args.fix_sample_size] |
| | elif args.random_ratio_crop: |
| | if rng is None: |
| | random_sample_size = aspect_ratio_random_crop_sample_size[ |
| | np.random.choice(list(aspect_ratio_random_crop_sample_size.keys()), p = ASPECT_RATIO_RANDOM_CROP_PROB) |
| | ] |
| | else: |
| | random_sample_size = aspect_ratio_random_crop_sample_size[ |
| | rng.choice(list(aspect_ratio_random_crop_sample_size.keys()), p = ASPECT_RATIO_RANDOM_CROP_PROB) |
| | ] |
| | random_sample_size = [int(x / 16) * 16 for x in random_sample_size] |
| | else: |
| | closest_size, closest_ratio = get_closest_ratio(h, w, ratios=aspect_ratio_sample_size) |
| | closest_size = [int(x / 16) * 16 for x in closest_size] |
| |
|
| | for example in examples: |
| | if args.fix_sample_size is not None: |
| | |
| | pixel_values = torch.from_numpy(example["pixel_values"]).permute(0, 3, 1, 2).contiguous() |
| | pixel_values = pixel_values / 255. |
| |
|
| | |
| | fix_sample_size = list(map(lambda x: int(x), fix_sample_size)) |
| | transform = transforms.Compose([ |
| | transforms.Resize(fix_sample_size, interpolation=transforms.InterpolationMode.BILINEAR), |
| | transforms.CenterCrop(fix_sample_size), |
| | transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), |
| | ]) |
| | elif args.random_ratio_crop: |
| | |
| | pixel_values = torch.from_numpy(example["pixel_values"]).permute(0, 3, 1, 2).contiguous() |
| | pixel_values = pixel_values / 255. |
| |
|
| | |
| | b, c, h, w = pixel_values.size() |
| | th, tw = random_sample_size |
| | if th / tw > h / w: |
| | nh = int(th) |
| | nw = int(w / h * nh) |
| | else: |
| | nw = int(tw) |
| | nh = int(h / w * nw) |
| | |
| | transform = transforms.Compose([ |
| | transforms.Resize([nh, nw]), |
| | transforms.CenterCrop([int(x) for x in random_sample_size]), |
| | transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), |
| | ]) |
| | else: |
| | |
| | pixel_values = torch.from_numpy(example["pixel_values"]).permute(0, 3, 1, 2).contiguous() |
| | pixel_values = pixel_values / 255. |
| |
|
| | |
| | closest_size = list(map(lambda x: int(x), closest_size)) |
| | if closest_size[0] / h > closest_size[1] / w: |
| | resize_size = closest_size[0], int(w * closest_size[0] / h) |
| | else: |
| | resize_size = int(h * closest_size[1] / w), closest_size[1] |
| | |
| | transform = transforms.Compose([ |
| | transforms.Resize(resize_size, interpolation=transforms.InterpolationMode.BILINEAR), |
| | transforms.CenterCrop(closest_size), |
| | transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), |
| | ]) |
| | new_examples["pixel_values"].append(transform(pixel_values)) |
| | new_examples["text"].append(example["text"]) |
| |
|
| | |
| | new_examples["pixel_values"] = torch.stack([example for example in new_examples["pixel_values"]]) |
| |
|
| | |
| | if args.enable_text_encoder_in_dataloader: |
| | prompt_embeds = encode_prompt( |
| | batch['text'], device="cpu", |
| | text_encoder=text_encoder, |
| | tokenizer=tokenizer, |
| | ) |
| |
|
| | new_examples['prompt_embeds'] = prompt_embeds |
| |
|
| | return new_examples |
| |
|
| | |
| | train_dataloader = torch.utils.data.DataLoader( |
| | train_dataset, |
| | batch_sampler=batch_sampler, |
| | collate_fn=collate_fn, |
| | persistent_workers=True if args.dataloader_num_workers != 0 else False, |
| | num_workers=args.dataloader_num_workers, |
| | worker_init_fn=worker_init_fn(args.seed + accelerator.process_index) |
| | ) |
| | else: |
| | |
| | batch_sampler_generator = torch.Generator().manual_seed(args.seed) |
| | batch_sampler = ImageVideoSampler(RandomSampler(train_dataset, generator=batch_sampler_generator), train_dataset, args.train_batch_size) |
| | train_dataloader = torch.utils.data.DataLoader( |
| | train_dataset, |
| | batch_sampler=batch_sampler, |
| | persistent_workers=True if args.dataloader_num_workers != 0 else False, |
| | num_workers=args.dataloader_num_workers, |
| | worker_init_fn=worker_init_fn(args.seed + accelerator.process_index) |
| | ) |
| |
|
| | |
| | overrode_max_train_steps = False |
| | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| | if args.max_train_steps is None: |
| | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| | overrode_max_train_steps = True |
| |
|
| | lr_scheduler = get_scheduler( |
| | args.lr_scheduler, |
| | optimizer=optimizer, |
| | num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, |
| | num_training_steps=args.max_train_steps * accelerator.num_processes, |
| | ) |
| |
|
| | |
| | transformer3d, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( |
| | transformer3d, optimizer, train_dataloader, lr_scheduler |
| | ) |
| |
|
| | if fsdp_stage != 0 or zero_stage != 0: |
| | from functools import partial |
| |
|
| | from videox_fun.dist import set_multi_gpus_devices, shard_model |
| |
|
| | shard_fn = partial(shard_model, device_id=accelerator.device, param_dtype=weight_dtype, module_to_wrapper=text_encoder.model.layers) |
| | text_encoder = shard_fn(text_encoder) |
| |
|
| | if args.use_ema: |
| | ema_transformer3d.to(accelerator.device) |
| |
|
| | |
| | vae.to(accelerator.device if not args.low_vram else "cpu", dtype=weight_dtype) |
| | if not args.enable_text_encoder_in_dataloader: |
| | text_encoder.to(accelerator.device if not args.low_vram else "cpu", dtype=weight_dtype) |
| |
|
| | |
| | num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
| | if overrode_max_train_steps: |
| | args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
| | |
| | args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
| |
|
| | |
| | |
| | if accelerator.is_main_process: |
| | tracker_config = dict(vars(args)) |
| | keys_to_pop = [k for k, v in tracker_config.items() if isinstance(v, list)] |
| | for k in keys_to_pop: |
| | tracker_config.pop(k) |
| | print(f"Removed tracker_config['{k}']") |
| | accelerator.init_trackers(args.tracker_project_name, tracker_config) |
| |
|
| | |
| | def unwrap_model(model): |
| | model = accelerator.unwrap_model(model) |
| | model = model._orig_mod if is_compiled_module(model) else model |
| | return model |
| |
|
| | |
| | total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps |
| |
|
| | logger.info("***** Running training *****") |
| | logger.info(f" Num examples = {len(train_dataset)}") |
| | logger.info(f" Num Epochs = {args.num_train_epochs}") |
| | logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") |
| | logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") |
| | logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") |
| | logger.info(f" Total optimization steps = {args.max_train_steps}") |
| | global_step = 0 |
| | first_epoch = 0 |
| |
|
| | |
| | if args.resume_from_checkpoint: |
| | if args.resume_from_checkpoint != "latest": |
| | path = os.path.basename(args.resume_from_checkpoint) |
| | else: |
| | |
| | dirs = os.listdir(args.output_dir) |
| | dirs = [d for d in dirs if d.startswith("checkpoint")] |
| | dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) |
| | path = dirs[-1] if len(dirs) > 0 else None |
| |
|
| | if path is None: |
| | accelerator.print( |
| | f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." |
| | ) |
| | args.resume_from_checkpoint = None |
| | initial_global_step = 0 |
| | else: |
| | global_step = int(path.split("-")[1]) |
| |
|
| | initial_global_step = global_step |
| |
|
| | pkl_path = os.path.join(os.path.join(args.output_dir, path), "sampler_pos_start.pkl") |
| | if os.path.exists(pkl_path): |
| | with open(pkl_path, 'rb') as file: |
| | _, first_epoch = pickle.load(file) |
| | else: |
| | first_epoch = global_step // num_update_steps_per_epoch |
| | print(f"Load pkl from {pkl_path}. Get first_epoch = {first_epoch}.") |
| |
|
| | accelerator.print(f"Resuming from checkpoint {path}") |
| | accelerator.load_state(os.path.join(args.output_dir, path)) |
| | else: |
| | initial_global_step = 0 |
| |
|
| | progress_bar = tqdm( |
| | range(0, args.max_train_steps), |
| | initial=initial_global_step, |
| | desc="Steps", |
| | |
| | disable=not accelerator.is_local_main_process, |
| | ) |
| |
|
| | if args.multi_stream: |
| | |
| | vae_stream_1 = torch.cuda.Stream() |
| | vae_stream_2 = torch.cuda.Stream() |
| | else: |
| | vae_stream_1 = None |
| | vae_stream_2 = None |
| |
|
| | |
| | idx_sampling = DiscreteSampling(args.train_sampling_steps, uniform_sampling=args.uniform_sampling) |
| |
|
| | for epoch in range(first_epoch, args.num_train_epochs): |
| | train_loss = 0.0 |
| | batch_sampler.sampler.generator = torch.Generator().manual_seed(args.seed + epoch) |
| | for step, batch in enumerate(train_dataloader): |
| | |
| | if epoch == first_epoch and step == 0: |
| | pixel_values, texts = batch['pixel_values'].cpu(), batch['text'] |
| | pixel_values = rearrange(pixel_values, "b f c h w -> b c f h w") |
| | os.makedirs(os.path.join(args.output_dir, "sanity_check"), exist_ok=True) |
| | for idx, (pixel_value, text) in enumerate(zip(pixel_values, texts)): |
| | pixel_value = pixel_value[None, ...] |
| | gif_name = '-'.join(text.replace('/', '').split()[:10]) if not text == '' else f'{global_step}-{idx}' |
| | save_videos_grid(pixel_value, f"{args.output_dir}/sanity_check/{gif_name[:10]}.gif", rescale=True) |
| |
|
| | with accelerator.accumulate(transformer3d): |
| | |
| | pixel_values = batch["pixel_values"].to(weight_dtype) |
| |
|
| | if args.low_vram: |
| | torch.cuda.empty_cache() |
| | vae.to(accelerator.device) |
| | if not args.enable_text_encoder_in_dataloader: |
| | text_encoder.to("cpu") |
| |
|
| | with torch.no_grad(): |
| | |
| | def _batch_encode_vae(pixel_values): |
| | pixel_values = pixel_values.squeeze(1) |
| | bs = args.vae_mini_batch |
| | new_pixel_values = [] |
| | for i in range(0, pixel_values.shape[0], bs): |
| | pixel_values_bs = pixel_values[i : i + bs] |
| | pixel_values_bs = vae.encode(pixel_values_bs)[0] |
| | pixel_values_bs = pixel_values_bs.sample() |
| | new_pixel_values.append(pixel_values_bs) |
| | return torch.cat(new_pixel_values, dim = 0) |
| | if vae_stream_1 is not None: |
| | vae_stream_1.wait_stream(torch.cuda.current_stream()) |
| | with torch.cuda.stream(vae_stream_1): |
| | latents = _batch_encode_vae(pixel_values).unsqueeze(2) |
| | else: |
| | latents = _batch_encode_vae(pixel_values).unsqueeze(2) |
| |
|
| | |
| | if vae_stream_1 is not None: |
| | torch.cuda.current_stream().wait_stream(vae_stream_1) |
| |
|
| | if args.low_vram: |
| | vae.to('cpu') |
| | torch.cuda.empty_cache() |
| | if not args.enable_text_encoder_in_dataloader: |
| | text_encoder.to(accelerator.device) |
| |
|
| | if args.enable_text_encoder_in_dataloader: |
| | prompt_embeds = batch['prompt_embeds'].to(dtype=latents.dtype, device=accelerator.device) |
| | else: |
| | with torch.no_grad(): |
| | prompt_embeds = encode_prompt( |
| | batch['text'], device=accelerator.device, |
| | text_encoder=text_encoder, |
| | tokenizer=tokenizer, |
| | ) |
| |
|
| | if args.low_vram and not args.enable_text_encoder_in_dataloader: |
| | text_encoder.to('cpu') |
| | torch.cuda.empty_cache() |
| |
|
| | bsz, channel, f, height, width = latents.size() |
| | latents = ((latents - vae.config.shift_factor) * vae.config.scaling_factor).to(dtype=weight_dtype) |
| | noise = torch.randn(latents.size(), device=latents.device, generator=torch_rng, dtype=weight_dtype) |
| |
|
| | if not args.uniform_sampling: |
| | u = compute_density_for_timestep_sampling( |
| | weighting_scheme=args.weighting_scheme, |
| | batch_size=bsz, |
| | logit_mean=args.logit_mean, |
| | logit_std=args.logit_std, |
| | mode_scale=args.mode_scale, |
| | ) |
| | indices = (u * noise_scheduler.config.num_train_timesteps).long() |
| | else: |
| | |
| | |
| | |
| | indices = idx_sampling(bsz, generator=torch_rng, device=latents.device) |
| | indices = indices.long().cpu() |
| |
|
| | sigmas = np.linspace(1.0, 1 / args.train_sampling_steps, args.train_sampling_steps) |
| | image_seq_len = latents.shape[1] |
| | mu = calculate_shift( |
| | image_seq_len, |
| | noise_scheduler.config.get("base_image_seq_len", 256), |
| | noise_scheduler.config.get("max_image_seq_len", 4096), |
| | noise_scheduler.config.get("base_shift", 0.5), |
| | noise_scheduler.config.get("max_shift", 1.15), |
| | ) |
| | noise_scheduler.set_timesteps(sigmas=sigmas, device=latents.device, mu=mu) |
| | timesteps = noise_scheduler.timesteps[indices].to(device=latents.device) |
| |
|
| | def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): |
| | sigmas = noise_scheduler.sigmas.to(device=accelerator.device, dtype=dtype) |
| | schedule_timesteps = noise_scheduler.timesteps.to(accelerator.device) |
| | timesteps = timesteps.to(accelerator.device) |
| | step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] |
| |
|
| | sigma = sigmas[step_indices].flatten() |
| | while len(sigma.shape) < n_dim: |
| | sigma = sigma.unsqueeze(-1) |
| | return sigma |
| |
|
| | |
| | |
| | sigmas = get_sigmas(timesteps, n_dim=latents.ndim, dtype=latents.dtype) |
| | noisy_latents = (1.0 - sigmas) * latents + sigmas * noise |
| |
|
| | |
| | target = noise - latents |
| |
|
| | timesteps = (1000 - timesteps) / 1000 |
| | |
| | with torch.cuda.amp.autocast(dtype=weight_dtype), torch.cuda.device(device=accelerator.device): |
| | noise_pred = transformer3d( |
| | x=noisy_latents, |
| | t=timesteps, |
| | cap_feats=prompt_embeds, |
| | )[0] |
| |
|
| | def custom_mse_loss(noise_pred, target, weighting=None, threshold=50): |
| | noise_pred = noise_pred.float() |
| | target = target.float() |
| | diff = noise_pred - target |
| | mse_loss = F.mse_loss(noise_pred, target, reduction='none') |
| | mask = (diff.abs() <= threshold).float() |
| | masked_loss = mse_loss * mask |
| | if weighting is not None: |
| | masked_loss = masked_loss * weighting |
| | final_loss = masked_loss.mean() |
| | return final_loss |
| | |
| | weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas) |
| | loss = custom_mse_loss(noise_pred.float(), target.float(), weighting.float()) |
| | loss = loss.mean() |
| |
|
| | |
| | avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() |
| | train_loss += avg_loss.item() / args.gradient_accumulation_steps |
| |
|
| | |
| | accelerator.backward(loss) |
| | if accelerator.sync_gradients: |
| | if not args.use_deepspeed and not args.use_fsdp: |
| | trainable_params_grads = [p.grad for p in trainable_params if p.grad is not None] |
| | trainable_params_total_norm = torch.norm(torch.stack([torch.norm(g.detach(), 2) for g in trainable_params_grads]), 2) |
| | max_grad_norm = linear_decay(args.max_grad_norm * args.initial_grad_norm_ratio, args.max_grad_norm, args.abnormal_norm_clip_start, global_step) |
| | if trainable_params_total_norm / max_grad_norm > 5 and global_step > args.abnormal_norm_clip_start: |
| | actual_max_grad_norm = max_grad_norm / min((trainable_params_total_norm / max_grad_norm), 10) |
| | else: |
| | actual_max_grad_norm = max_grad_norm |
| | else: |
| | actual_max_grad_norm = args.max_grad_norm |
| |
|
| | if not args.use_deepspeed and not args.use_fsdp and args.report_model_info and accelerator.is_main_process: |
| | if trainable_params_total_norm > 1 and global_step > args.abnormal_norm_clip_start: |
| | for name, param in transformer3d.named_parameters(): |
| | if param.requires_grad: |
| | writer.add_scalar(f'gradients/before_clip_norm/{name}', param.grad.norm(), global_step=global_step) |
| |
|
| | norm_sum = accelerator.clip_grad_norm_(trainable_params, actual_max_grad_norm) |
| | if not args.use_deepspeed and not args.use_fsdp and args.report_model_info and accelerator.is_main_process: |
| | writer.add_scalar(f'gradients/norm_sum', norm_sum, global_step=global_step) |
| | writer.add_scalar(f'gradients/actual_max_grad_norm', actual_max_grad_norm, global_step=global_step) |
| | optimizer.step() |
| | lr_scheduler.step() |
| | optimizer.zero_grad() |
| |
|
| | |
| | if accelerator.sync_gradients: |
| |
|
| | if args.use_ema: |
| | ema_transformer3d.step(transformer3d.parameters()) |
| | progress_bar.update(1) |
| | global_step += 1 |
| | accelerator.log({"train_loss": train_loss}, step=global_step) |
| | train_loss = 0.0 |
| |
|
| | if global_step % args.checkpointing_steps == 0: |
| | if args.use_deepspeed or args.use_fsdp or accelerator.is_main_process: |
| | |
| | if args.checkpoints_total_limit is not None: |
| | checkpoints = os.listdir(args.output_dir) |
| | checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] |
| | checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) |
| |
|
| | |
| | if len(checkpoints) >= args.checkpoints_total_limit: |
| | num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 |
| | removing_checkpoints = checkpoints[0:num_to_remove] |
| |
|
| | logger.info( |
| | f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" |
| | ) |
| | logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") |
| |
|
| | for removing_checkpoint in removing_checkpoints: |
| | removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) |
| | shutil.rmtree(removing_checkpoint) |
| |
|
| | gc.collect() |
| | torch.cuda.empty_cache() |
| | torch.cuda.ipc_collect() |
| | save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") |
| | accelerator.save_state(save_path) |
| | logger.info(f"Saved state to {save_path}") |
| |
|
| | if accelerator.is_main_process: |
| | if args.validation_prompts is not None and global_step % args.validation_steps == 0: |
| | if args.use_ema: |
| | |
| | ema_transformer3d.store(transformer3d.parameters()) |
| | ema_transformer3d.copy_to(transformer3d.parameters()) |
| | log_validation( |
| | vae, |
| | text_encoder, |
| | tokenizer, |
| | transformer3d, |
| | network, |
| | args, |
| | accelerator, |
| | weight_dtype, |
| | global_step, |
| | ) |
| | if args.use_ema: |
| | |
| | ema_transformer3d.restore(transformer3d.parameters()) |
| |
|
| | logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} |
| | progress_bar.set_postfix(**logs) |
| |
|
| | if global_step >= args.max_train_steps: |
| | break |
| |
|
| | if accelerator.is_main_process: |
| | if args.validation_prompts is not None and epoch % args.validation_epochs == 0: |
| | if args.use_ema: |
| | |
| | ema_transformer3d.store(transformer3d.parameters()) |
| | ema_transformer3d.copy_to(transformer3d.parameters()) |
| | log_validation( |
| | vae, |
| | text_encoder, |
| | tokenizer, |
| | tokenizer_2, |
| | transformer3d, |
| | network, |
| | args, |
| | accelerator, |
| | weight_dtype, |
| | global_step, |
| | ) |
| | if args.use_ema: |
| | |
| | ema_transformer3d.restore(transformer3d.parameters()) |
| |
|
| | |
| | accelerator.wait_for_everyone() |
| | if args.use_deepspeed or args.use_fsdp or accelerator.is_main_process: |
| | gc.collect() |
| | torch.cuda.empty_cache() |
| | torch.cuda.ipc_collect() |
| | save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") |
| | accelerator.save_state(save_path) |
| | logger.info(f"Saved state to {save_path}") |
| |
|
| | accelerator.end_training() |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|