File size: 4,181 Bytes
984184e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
# Pretrained diffusers model path.
pretrained_model_path: "ckpts/stable-video-diffusion-img2vid"
# The folder where your training outputs will be placed.
output_dir: "./outputs"
seed: 23
num_steps: 25
# Xformers must be installed for best memory savings and performance (< Pytorch 2.0)
enable_xformers_memory_efficient_attention: True
# Use scaled dot product attention (Only available with >= Torch 2.0)
enable_torch_2_attn: True

use_sarp: true

use_motion_lora: true
train_motion_lora_only: false
retrain_motion_lora: false

use_inversed_latents: true
use_attention_matching: true
use_consistency_attention_control: false
dtype: fp16

save_last_frames: True
load_from_last_frames_latents: 
  # - "..path.."

# data_params
data_params:
  video_path: "../datasets/svdedit/item4/rocket.mp4"
  keyframe_paths:
    - "../datasets/svdedit/item4/blue_aircraft.png"
    - "../datasets/svdedit/item4/black_aircraft.png"
  start_t: 0
  end_t: 2
  sample_fps: 7
  chunk_size: 14
  overlay_size: 1
  normalize: true
  output_fps: 7
  save_sampled_frame: true
  output_res: [576, 1024]
  pad_to_fit: false

train_motion_lora_params:
  cache_latents: true
  cached_latent_dir: null #/path/to/cached_latents
  lora_rank: 32
  # Use LoRA for the UNET model.
  use_unet_lora: True
  # LoRA Dropout. This parameter adds the probability of randomly zeros out elements. Helps prevent overfitting.
  # See: https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html
  lora_unet_dropout: 0.1
  # The only time you want this off is if you're doing full LoRA training.
  save_pretrained_model: False
  # Learning rate for AdamW
  learning_rate: 5e-4
  # Weight decay. Higher = more regularization. Lower = closer to dataset.
  adam_weight_decay: 1e-2
  # Maximum number of train steps. Model is saved after training.
  max_train_steps: 300
  # Saves a model every nth step.
  checkpointing_steps: 50
  # How many steps to do for validation if sample_preview is enabled.
  validation_steps: 50
  # Whether or not we want to use mixed precision with accelerate
  mixed_precision: "fp16"
  # Trades VRAM usage for speed. You lose roughly 20% of training speed, but save a lot of VRAM.
  # If you need to save more VRAM, it can also be enabled for the text encoder, but reduces speed x2.
  gradient_checkpointing: True
  image_encoder_gradient_checkpointing: True

  train_data:
    # The width and height in which you want your training data to be resized to.
    width: 896
    height: 512
    # This will find the closest aspect ratio to your input width and height. 
    # For example, 512x512 width and height with a video of resolution 1280x720 will be resized to 512x256
    use_data_aug: ~ #"controlnet"
    pad_to_fit: false

  validation_data:
    # Whether or not to sample preview during training (Requires more VRAM).
    sample_preview: True
    # The number of frames to sample during validation.
    num_frames: 14
    # Height and width of validation sample.
    width: 1024
    height: 576
    pad_to_fit: false
    # scale of spatial LoRAs, default is 0
    spatial_scale: 0
    # scale of noise prior, i.e. the scale of inversion noises
    noise_prior:
      #- 0.0 
      - 1.0

sarp_params:
  sarp_noise_scale: 0.005

attention_matching_params:
  best_checkpoint_index: 50
  lora_scale: 1.0
  # lora path
  lora_dir: "./cache/item4/train_motion_lora"
  max_guidance_scale: 1.5

  disk_store: True
  load_attention_store: "./cache/item4/attention_store/"
  registered_modules:
    BasicTransformerBlock:
      - "attn1"
      #- "attn2"
    TemporalBasicTransformerBlock:
      - "attn1"
      #- "attn2"
  control_mode: 
    spatial_self: "masked_copy"
    temporal_self: "copy_v2"
  cross_replace_steps: 0.0
  temporal_self_replace_steps: 1.0
  spatial_self_replace_steps: 1.0
  spatial_attention_chunk_size: 1

  params:
    edit0:
      temporal_step_thr: [0.5, 0.8]
      mask_thr: [0.35, 0.35]
    edit1:
      temporal_step_thr: [0.5, 0.8]
      mask_thr: [0.35, 0.35]

long_video_params:
  mode: "skip-interval"
  registered_modules:
    BasicTransformerBlock:
      #- "attn1"
      #- "attn2"
    TemporalBasicTransformerBlock:
      - "attn1"
      #- "attn2"