Vicky0522 commited on
Commit
4002bde
1 Parent(s): 07889b9

Delete item2/source_and_edits/config_multi_chunks.yaml

Browse files
item2/source_and_edits/config_multi_chunks.yaml DELETED
@@ -1,135 +0,0 @@
1
- # Pretrained diffusers model path.
2
- pretrained_model_path: "ckpts/stable-video-diffusion-img2vid"
3
- # The folder where your training outputs will be placed.
4
- output_dir: "./outputs"
5
- seed: 23
6
- num_steps: 25
7
- # Xformers must be installed for best memory savings and performance (< Pytorch 2.0)
8
- enable_xformers_memory_efficient_attention: True
9
- # Use scaled dot product attention (Only available with >= Torch 2.0)
10
- enable_torch_2_attn: True
11
-
12
- use_sarp: true
13
-
14
- use_motion_lora: true
15
- train_motion_lora_only: false
16
- retrain_motion_lora: false
17
-
18
- use_inversed_latents: true
19
- use_attention_matching: true
20
- use_consistency_attention_control: false
21
- dtype: fp16
22
-
23
- save_last_frames: True
24
-
25
- # data_params
26
- data_params:
27
- video_path: "../datasets/svdedit/item2/sora.mp4"
28
- keyframe_paths:
29
- - "../datasets/svdedit/item2/sora_kimono.png"
30
- start_t: 0
31
- end_t: 4
32
- sample_fps: 7
33
- chunk_size: 16
34
- overlay_size: 1
35
- normalize: true
36
- output_fps: 7
37
- save_sampled_frame: true
38
- output_res: [576, 1024]
39
- pad_to_fit: True
40
- begin_clip_id: 0
41
- end_clip_id: 2
42
-
43
- train_motion_lora_params:
44
- cache_latents: true
45
- cached_latent_dir: null #/path/to/cached_latents
46
- lora_rank: 32
47
- # Use LoRA for the UNET model.
48
- use_unet_lora: True
49
- # LoRA Dropout. This parameter adds the probability of randomly zeros out elements. Helps prevent overfitting.
50
- # See: https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html
51
- lora_unet_dropout: 0.1
52
- # The only time you want this off is if you're doing full LoRA training.
53
- save_pretrained_model: False
54
- # Learning rate for AdamW
55
- learning_rate: 5e-4
56
- # Weight decay. Higher = more regularization. Lower = closer to dataset.
57
- adam_weight_decay: 1e-2
58
- # Maximum number of train steps. Model is saved after training.
59
- max_train_steps: 300
60
- # Saves a model every nth step.
61
- checkpointing_steps: 50
62
- # How many steps to do for validation if sample_preview is enabled.
63
- validation_steps: 50
64
- # Whether or not we want to use mixed precision with accelerate
65
- mixed_precision: "fp16"
66
- # Trades VRAM usage for speed. You lose roughly 20% of training speed, but save a lot of VRAM.
67
- # If you need to save more VRAM, it can also be enabled for the text encoder, but reduces speed x2.
68
- gradient_checkpointing: True
69
- image_encoder_gradient_checkpointing: True
70
-
71
- train_data:
72
- # The width and height in which you want your training data to be resized to.
73
- width: 896
74
- height: 512
75
- # This will find the closest aspect ratio to your input width and height.
76
- # For example, 512x512 width and height with a video of resolution 1280x720 will be resized to 512x256
77
- use_data_aug: ~ #"controlnet"
78
- pad_to_fit: True
79
-
80
- validation_data:
81
- # Whether or not to sample preview during training (Requires more VRAM).
82
- sample_preview: True
83
- # The number of frames to sample during validation.
84
- num_frames: 14
85
- # Height and width of validation sample.
86
- width: 1024
87
- height: 576
88
- pad_to_fit: True
89
- # scale of spatial LoRAs, default is 0
90
- spatial_scale: 0
91
- # scale of noise prior, i.e. the scale of inversion noises
92
- noise_prior:
93
- #- 0.0
94
- - 1.0
95
-
96
- sarp_params:
97
- sarp_noise_scale: 0.005
98
-
99
- attention_matching_params:
100
- best_checkpoint_index: 250
101
- lora_scale: 1.0
102
- # lora path
103
- lora_dir: "./i2vedit_2024-05-19T01-39-37/train_motion_lora"
104
- max_guidance_scale: 2.0
105
-
106
- disk_store: True
107
- load_attention_store: "./cache/item2/attention_store"
108
- registered_modules:
109
- BasicTransformerBlock:
110
- - "attn1"
111
- #- "attn2"
112
- TemporalBasicTransformerBlock:
113
- - "attn1"
114
- #- "attn2"
115
- control_mode:
116
- spatial_self: "masked_copy"
117
- temporal_self: "copy_v2"
118
- cross_replace_steps: 0.0
119
- temporal_self_replace_steps: 1.0
120
- temporal_step_thr: [0.5, 0.8]
121
- spatial_self_replace_steps: 1.0
122
- mask_thr: [0.35, 0.35]
123
- spatial_attention_chunk_size: 1
124
-
125
- long_video_params:
126
- mode: "skip-interval"
127
- registered_modules:
128
- BasicTransformerBlock:
129
- #- "attn1"
130
- #- "attn2"
131
- TemporalBasicTransformerBlock:
132
- - "attn1"
133
- #- "attn2"
134
-
135
-