Spaces:
Running
on
Zero
Running
on
Zero
Upload 6 files
Browse files- configs/ltxv-13b-0.9.7-dev.yaml +34 -0
- configs/ltxv-13b-0.9.7-distilled.yaml +28 -0
- configs/ltxv-2b-0.9.5.yaml +17 -0
- configs/ltxv-2b-0.9.6-dev.yaml +17 -0
- configs/ltxv-2b-0.9.6-distilled.yaml +16 -0
- configs/ltxv-2b-0.9.yaml +17 -0
configs/ltxv-13b-0.9.7-dev.yaml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pipeline_type: multi-scale
|
| 2 |
+
checkpoint_path: "ltxv-13b-0.9.7-dev.safetensors"
|
| 3 |
+
downscale_factor: 0.6666666
|
| 4 |
+
spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.7.safetensors"
|
| 5 |
+
stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
|
| 6 |
+
decode_timestep: 0.05
|
| 7 |
+
decode_noise_scale: 0.025
|
| 8 |
+
text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
|
| 9 |
+
precision: "bfloat16"
|
| 10 |
+
sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
|
| 11 |
+
prompt_enhancement_words_threshold: 120
|
| 12 |
+
prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
|
| 13 |
+
prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
|
| 14 |
+
stochastic_sampling: false
|
| 15 |
+
|
| 16 |
+
first_pass:
|
| 17 |
+
guidance_scale: [1, 1, 6, 8, 6, 1, 1]
|
| 18 |
+
stg_scale: [0, 0, 4, 4, 4, 2, 1]
|
| 19 |
+
rescaling_scale: [1, 1, 0.5, 0.5, 1, 1, 1]
|
| 20 |
+
guidance_timesteps: [1.0, 0.996, 0.9933, 0.9850, 0.9767, 0.9008, 0.6180]
|
| 21 |
+
skip_block_list: [[], [11, 25, 35, 39], [22, 35, 39], [28], [28], [28], [28]]
|
| 22 |
+
num_inference_steps: 30
|
| 23 |
+
skip_final_inference_steps: 3
|
| 24 |
+
cfg_star_rescale: true
|
| 25 |
+
|
| 26 |
+
second_pass:
|
| 27 |
+
guidance_scale: [1]
|
| 28 |
+
stg_scale: [1]
|
| 29 |
+
rescaling_scale: [1]
|
| 30 |
+
guidance_timesteps: [1.0]
|
| 31 |
+
skip_block_list: [27]
|
| 32 |
+
num_inference_steps: 30
|
| 33 |
+
skip_initial_inference_steps: 17
|
| 34 |
+
cfg_star_rescale: true
|
configs/ltxv-13b-0.9.7-distilled.yaml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pipeline_type: multi-scale
|
| 2 |
+
checkpoint_path: "ltxv-13b-0.9.7-distilled.safetensors"
|
| 3 |
+
downscale_factor: 0.6666666
|
| 4 |
+
spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.7.safetensors"
|
| 5 |
+
stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
|
| 6 |
+
decode_timestep: 0.05
|
| 7 |
+
decode_noise_scale: 0.025
|
| 8 |
+
text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
|
| 9 |
+
precision: "bfloat16"
|
| 10 |
+
sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
|
| 11 |
+
prompt_enhancement_words_threshold: 120
|
| 12 |
+
prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
|
| 13 |
+
prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
|
| 14 |
+
stochastic_sampling: false
|
| 15 |
+
|
| 16 |
+
first_pass:
|
| 17 |
+
timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250]
|
| 18 |
+
guidance_scale: 1
|
| 19 |
+
stg_scale: 0
|
| 20 |
+
rescaling_scale: 1
|
| 21 |
+
skip_block_list: [42]
|
| 22 |
+
|
| 23 |
+
second_pass:
|
| 24 |
+
timesteps: [0.9094, 0.7250, 0.4219]
|
| 25 |
+
guidance_scale: 1
|
| 26 |
+
stg_scale: 0
|
| 27 |
+
rescaling_scale: 1
|
| 28 |
+
skip_block_list: [42]
|
configs/ltxv-2b-0.9.5.yaml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pipeline_type: base
|
| 2 |
+
checkpoint_path: "ltx-video-2b-v0.9.5.safetensors"
|
| 3 |
+
guidance_scale: 3
|
| 4 |
+
stg_scale: 1
|
| 5 |
+
rescaling_scale: 0.7
|
| 6 |
+
skip_block_list: [19]
|
| 7 |
+
num_inference_steps: 40
|
| 8 |
+
stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
|
| 9 |
+
decode_timestep: 0.05
|
| 10 |
+
decode_noise_scale: 0.025
|
| 11 |
+
text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
|
| 12 |
+
precision: "bfloat16"
|
| 13 |
+
sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
|
| 14 |
+
prompt_enhancement_words_threshold: 120
|
| 15 |
+
prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
|
| 16 |
+
prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
|
| 17 |
+
stochastic_sampling: false
|
configs/ltxv-2b-0.9.6-dev.yaml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pipeline_type: base
|
| 2 |
+
checkpoint_path: "ltxv-2b-0.9.6-dev-04-25.safetensors"
|
| 3 |
+
guidance_scale: 3
|
| 4 |
+
stg_scale: 1
|
| 5 |
+
rescaling_scale: 0.7
|
| 6 |
+
skip_block_list: [19]
|
| 7 |
+
num_inference_steps: 40
|
| 8 |
+
stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
|
| 9 |
+
decode_timestep: 0.05
|
| 10 |
+
decode_noise_scale: 0.025
|
| 11 |
+
text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
|
| 12 |
+
precision: "bfloat16"
|
| 13 |
+
sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
|
| 14 |
+
prompt_enhancement_words_threshold: 120
|
| 15 |
+
prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
|
| 16 |
+
prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
|
| 17 |
+
stochastic_sampling: false
|
configs/ltxv-2b-0.9.6-distilled.yaml
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pipeline_type: base
|
| 2 |
+
checkpoint_path: "ltxv-2b-0.9.6-distilled-04-25.safetensors"
|
| 3 |
+
guidance_scale: 1
|
| 4 |
+
stg_scale: 0
|
| 5 |
+
rescaling_scale: 1
|
| 6 |
+
num_inference_steps: 8
|
| 7 |
+
stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
|
| 8 |
+
decode_timestep: 0.05
|
| 9 |
+
decode_noise_scale: 0.025
|
| 10 |
+
text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
|
| 11 |
+
precision: "bfloat16"
|
| 12 |
+
sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
|
| 13 |
+
prompt_enhancement_words_threshold: 120
|
| 14 |
+
prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
|
| 15 |
+
prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
|
| 16 |
+
stochastic_sampling: true
|
configs/ltxv-2b-0.9.yaml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pipeline_type: base
|
| 2 |
+
checkpoint_path: "ltx-video-2b-v0.9.safetensors"
|
| 3 |
+
guidance_scale: 3
|
| 4 |
+
stg_scale: 1
|
| 5 |
+
rescaling_scale: 0.7
|
| 6 |
+
skip_block_list: [19]
|
| 7 |
+
num_inference_steps: 40
|
| 8 |
+
stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
|
| 9 |
+
decode_timestep: 0.05
|
| 10 |
+
decode_noise_scale: 0.025
|
| 11 |
+
text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
|
| 12 |
+
precision: "bfloat16"
|
| 13 |
+
sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
|
| 14 |
+
prompt_enhancement_words_threshold: 120
|
| 15 |
+
prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
|
| 16 |
+
prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
|
| 17 |
+
stochastic_sampling: false
|