Vicky0522 commited on
Commit
815640c
1 Parent(s): f214734

Upload item8/config_single_chunk.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. item8/config_single_chunk.yaml +157 -0
item8/config_single_chunk.yaml ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pretrained diffusers model path.
2
+ pretrained_model_path: "ckpts/stable-video-diffusion-img2vid"
3
+ # The folder where your training outputs will be placed.
4
+ output_dir: "./outputs"
5
+ seed: 23
6
+ num_steps: 25
7
+ # Xformers must be installed for best memory savings and performance (< Pytorch 2.0)
8
+ enable_xformers_memory_efficient_attention: True
9
+ # Use scaled dot product attention (Only available with >= Torch 2.0)
10
+ enable_torch_2_attn: True
11
+
12
+ use_sarp: true
13
+
14
+ use_motion_lora: true
15
+ train_motion_lora_only: false
16
+ retrain_motion_lora: false
17
+
18
+ use_inversed_latents: true
19
+ use_attention_matching: true
20
+ use_consistency_attention_control: false
21
+ dtype: fp16
22
+
23
+ visualize_attention_store: false
24
+ visualize_attention_store_steps: [0, 5, 10, 15, 20, 24]
25
+
26
+ save_last_frames: True
27
+
28
+ # data_params
29
+ data_params:
30
+ video_path: "../datasets/svdedit/item8/source.mp4"
31
+ keyframe_paths:
32
+ - "../datasets/svdedit/item8/robot.png"
33
+ - "../datasets/svdedit/item8/helmet.jpg"
34
+ - "../datasets/svdedit/item8/jackma.png"
35
+ - "../datasets/svdedit/item8/party_hat.png"
36
+ - "../datasets/svdedit/item8/santa_hat.webp"
37
+ - "../datasets/svdedit/item8/star.png"
38
+ start_t: 0
39
+ end_t: -1
40
+ sample_fps: 8
41
+ chunk_size: 16
42
+ overlay_size: 1
43
+ normalize: true
44
+ output_fps: 8
45
+ save_sampled_frame: true
46
+ output_res: [576, 1024]
47
+ pad_to_fit: true
48
+
49
+ train_motion_lora_params:
50
+ cache_latents: true
51
+ cached_latent_dir: null #/path/to/cached_latents
52
+ lora_rank: 32
53
+ # Use LoRA for the UNET model.
54
+ use_unet_lora: True
55
+ # LoRA Dropout. This parameter adds the probability of randomly zeros out elements. Helps prevent overfitting.
56
+ # See: https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html
57
+ lora_unet_dropout: 0.1
58
+ # The only time you want this off is if you're doing full LoRA training.
59
+ save_pretrained_model: False
60
+ # Learning rate for AdamW
61
+ learning_rate: 5e-4
62
+ # Weight decay. Higher = more regularization. Lower = closer to dataset.
63
+ adam_weight_decay: 1e-2
64
+ # Maximum number of train steps. Model is saved after training.
65
+ max_train_steps: 300
66
+ # Saves a model every nth step.
67
+ checkpointing_steps: 50
68
+ # How many steps to do for validation if sample_preview is enabled.
69
+ validation_steps: 50
70
+ # Whether or not we want to use mixed precision with accelerate
71
+ mixed_precision: "fp16"
72
+ # Trades VRAM usage for speed. You lose roughly 20% of training speed, but save a lot of VRAM.
73
+ # If you need to save more VRAM, it can also be enabled for the text encoder, but reduces speed x2.
74
+ gradient_checkpointing: True
75
+ image_encoder_gradient_checkpointing: True
76
+
77
+ train_data:
78
+ # The width and height in which you want your training data to be resized to.
79
+ width: 896
80
+ height: 512
81
+ # This will find the closest aspect ratio to your input width and height.
82
+ # For example, 512x512 width and height with a video of resolution 1280x720 will be resized to 512x256
83
+ use_data_aug: ~ #"controlnet"
84
+ pad_to_fit: true
85
+
86
+ validation_data:
87
+ # Whether or not to sample preview during training (Requires more VRAM).
88
+ sample_preview: True
89
+ # The number of frames to sample during validation.
90
+ num_frames: 14
91
+ # Height and width of validation sample.
92
+ width: 1024
93
+ height: 576
94
+ pad_to_fit: true
95
+ # scale of spatial LoRAs, default is 0
96
+ spatial_scale: 0
97
+ # scale of noise prior, i.e. the scale of inversion noises
98
+ noise_prior:
99
+ #- 0.0
100
+ - 1.0
101
+
102
+ sarp_params:
103
+ sarp_noise_scale: 0.005
104
+
105
+ attention_matching_params:
106
+ best_checkpoint_index: 250
107
+ lora_scale: 1.0
108
+ # lora path
109
+ lora_dir: "./cache/item8/train_motion_lora/"
110
+ max_guidance_scale: 2.0
111
+ disk_store: True
112
+ load_attention_store: "./cache/item8/attention_store/"
113
+ registered_modules:
114
+ BasicTransformerBlock:
115
+ - "attn1"
116
+ #- "attn2"
117
+ TemporalBasicTransformerBlock:
118
+ - "attn1"
119
+ #- "attn2"
120
+ control_mode:
121
+ spatial_self: "masked_copy"
122
+ temporal_self: "copy_v2"
123
+ cross_replace_steps: 0.0
124
+ temporal_self_replace_steps: 1.0
125
+ spatial_self_replace_steps: 1.0
126
+ spatial_attention_chunk_size: 1
127
+
128
+ params:
129
+ edit0:
130
+ temporal_step_thr: [0.4, 0.5]
131
+ mask_thr: [0.35, 0.35]
132
+ edit1:
133
+ temporal_step_thr: [0.5, 0.8]
134
+ mask_thr: [0.35, 0.35]
135
+ edit2:
136
+ temporal_step_thr: [0.5, 0.8]
137
+ mask_thr: [0.35, 0.35]
138
+ edit3:
139
+ temporal_step_thr: [0.5, 0.8]
140
+ mask_thr: [0.35, 0.35]
141
+ edit4:
142
+ temporal_step_thr: [0.5, 0.8]
143
+ mask_thr: [0.35, 0.35]
144
+ edit5:
145
+ temporal_step_thr: [0.5, 0.8]
146
+ mask_thr: [0.35, 0.35]
147
+
148
+ long_video_params:
149
+ mode: "skip-interval"
150
+ registered_modules:
151
+ BasicTransformerBlock:
152
+ #- "attn1"
153
+ #- "attn2"
154
+ TemporalBasicTransformerBlock:
155
+ - "attn1"
156
+ #- "attn2"
157
+