I2VEdit-pretrained-videos / item8 /config_single_chunk.yaml
Vicky0522's picture
Upload item8/config_single_chunk.yaml with huggingface_hub
815640c verified
raw
history blame
4.69 kB
# Pretrained diffusers model path.
pretrained_model_path: "ckpts/stable-video-diffusion-img2vid"
# The folder where your training outputs will be placed.
output_dir: "./outputs"
seed: 23
num_steps: 25
# Xformers must be installed for best memory savings and performance (< Pytorch 2.0)
enable_xformers_memory_efficient_attention: True
# Use scaled dot product attention (Only available with >= Torch 2.0)
enable_torch_2_attn: True
use_sarp: true
use_motion_lora: true
train_motion_lora_only: false
retrain_motion_lora: false
use_inversed_latents: true
use_attention_matching: true
use_consistency_attention_control: false
dtype: fp16
visualize_attention_store: false
visualize_attention_store_steps: [0, 5, 10, 15, 20, 24]
save_last_frames: True
# data_params
data_params:
video_path: "../datasets/svdedit/item8/source.mp4"
keyframe_paths:
- "../datasets/svdedit/item8/robot.png"
- "../datasets/svdedit/item8/helmet.jpg"
- "../datasets/svdedit/item8/jackma.png"
- "../datasets/svdedit/item8/party_hat.png"
- "../datasets/svdedit/item8/santa_hat.webp"
- "../datasets/svdedit/item8/star.png"
start_t: 0
end_t: -1
sample_fps: 8
chunk_size: 16
overlay_size: 1
normalize: true
output_fps: 8
save_sampled_frame: true
output_res: [576, 1024]
pad_to_fit: true
train_motion_lora_params:
cache_latents: true
cached_latent_dir: null #/path/to/cached_latents
lora_rank: 32
# Use LoRA for the UNET model.
use_unet_lora: True
# LoRA Dropout. This parameter adds the probability of randomly zeros out elements. Helps prevent overfitting.
# See: https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html
lora_unet_dropout: 0.1
# The only time you want this off is if you're doing full LoRA training.
save_pretrained_model: False
# Learning rate for AdamW
learning_rate: 5e-4
# Weight decay. Higher = more regularization. Lower = closer to dataset.
adam_weight_decay: 1e-2
# Maximum number of train steps. Model is saved after training.
max_train_steps: 300
# Saves a model every nth step.
checkpointing_steps: 50
# How many steps to do for validation if sample_preview is enabled.
validation_steps: 50
# Whether or not we want to use mixed precision with accelerate
mixed_precision: "fp16"
# Trades VRAM usage for speed. You lose roughly 20% of training speed, but save a lot of VRAM.
# If you need to save more VRAM, it can also be enabled for the text encoder, but reduces speed x2.
gradient_checkpointing: True
image_encoder_gradient_checkpointing: True
train_data:
# The width and height in which you want your training data to be resized to.
width: 896
height: 512
# This will find the closest aspect ratio to your input width and height.
# For example, 512x512 width and height with a video of resolution 1280x720 will be resized to 512x256
use_data_aug: ~ #"controlnet"
pad_to_fit: true
validation_data:
# Whether or not to sample preview during training (Requires more VRAM).
sample_preview: True
# The number of frames to sample during validation.
num_frames: 14
# Height and width of validation sample.
width: 1024
height: 576
pad_to_fit: true
# scale of spatial LoRAs, default is 0
spatial_scale: 0
# scale of noise prior, i.e. the scale of inversion noises
noise_prior:
#- 0.0
- 1.0
sarp_params:
sarp_noise_scale: 0.005
attention_matching_params:
best_checkpoint_index: 250
lora_scale: 1.0
# lora path
lora_dir: "./cache/item8/train_motion_lora/"
max_guidance_scale: 2.0
disk_store: True
load_attention_store: "./cache/item8/attention_store/"
registered_modules:
BasicTransformerBlock:
- "attn1"
#- "attn2"
TemporalBasicTransformerBlock:
- "attn1"
#- "attn2"
control_mode:
spatial_self: "masked_copy"
temporal_self: "copy_v2"
cross_replace_steps: 0.0
temporal_self_replace_steps: 1.0
spatial_self_replace_steps: 1.0
spatial_attention_chunk_size: 1
params:
edit0:
temporal_step_thr: [0.4, 0.5]
mask_thr: [0.35, 0.35]
edit1:
temporal_step_thr: [0.5, 0.8]
mask_thr: [0.35, 0.35]
edit2:
temporal_step_thr: [0.5, 0.8]
mask_thr: [0.35, 0.35]
edit3:
temporal_step_thr: [0.5, 0.8]
mask_thr: [0.35, 0.35]
edit4:
temporal_step_thr: [0.5, 0.8]
mask_thr: [0.35, 0.35]
edit5:
temporal_step_thr: [0.5, 0.8]
mask_thr: [0.35, 0.35]
long_video_params:
mode: "skip-interval"
registered_modules:
BasicTransformerBlock:
#- "attn1"
#- "attn2"
TemporalBasicTransformerBlock:
- "attn1"
#- "attn2"