diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-Kontext-dev.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-Kontext-dev.sh
new file mode 100644
index 0000000000000000000000000000000000000000..de1fa5da150b197201e8e69c242a4f180ba8f5e2
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-Kontext-dev.sh
@@ -0,0 +1,14 @@
+accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_kontext.csv \
+ --data_file_keys "image,kontext_images" \
+ --max_pixels 1048576 \
+ --dataset_repeat 400 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-Kontext-dev:flux1-kontext-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 1 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-Kontext-dev_full" \
+ --trainable_models "dit" \
+ --extra_inputs "kontext_images" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-Krea-dev.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-Krea-dev.sh
new file mode 100644
index 0000000000000000000000000000000000000000..76727a8590176a693f6bd2d474a3b56f4c5e3d4d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-Krea-dev.sh
@@ -0,0 +1,12 @@
+accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
+ --max_pixels 1048576 \
+ --dataset_repeat 400 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-Krea-dev:flux1-krea-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 1 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-Krea-dev_full" \
+ --trainable_models "dit" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-AttriCtrl.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-AttriCtrl.sh
new file mode 100644
index 0000000000000000000000000000000000000000..91dc0cfe66924e9eae46fc581a1115971f49d7e4
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-AttriCtrl.sh
@@ -0,0 +1,14 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_attrictrl.csv \
+ --data_file_keys "image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,DiffSynth-Studio/AttriCtrl-FLUX.1-Dev:models/brightness.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 1 \
+ --remove_prefix_in_ckpt "pipe.value_controller.encoders.0." \
+ --output_path "./models/train/FLUX.1-dev-AttriCtrl_full" \
+ --trainable_models "value_controller" \
+ --extra_inputs "value_controller_inputs" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Inpainting-Beta.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Inpainting-Beta.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1ef6a407cf6d6e15540003ba792caf07c8111387
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Inpainting-Beta.sh
@@ -0,0 +1,14 @@
+accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_controlnet_inpaint.csv \
+ --data_file_keys "image,controlnet_image,controlnet_inpaint_mask" \
+ --max_pixels 1048576 \
+ --dataset_repeat 400 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta:diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 1 \
+ --remove_prefix_in_ckpt "pipe.controlnet.models.0." \
+ --output_path "./models/train/FLUX.1-dev-Controlnet-Inpainting-Beta_full" \
+ --trainable_models "controlnet" \
+ --extra_inputs "controlnet_image,controlnet_inpaint_mask" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Union-alpha.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Union-alpha.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f905bca7a109d757b8f7b59a1080e3151e7adb2c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Union-alpha.sh
@@ -0,0 +1,14 @@
+accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_controlnet_canny.csv \
+ --data_file_keys "image,controlnet_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 400 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,InstantX/FLUX.1-dev-Controlnet-Union-alpha:diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 1 \
+ --remove_prefix_in_ckpt "pipe.controlnet.models.0." \
+ --output_path "./models/train/FLUX.1-dev-Controlnet-Union-alpha_full" \
+ --trainable_models "controlnet" \
+ --extra_inputs "controlnet_image,controlnet_processor_id" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Upscaler.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Upscaler.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e2dd5d8df19d5f5c27ddc04f632bcb22c7571afc
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Upscaler.sh
@@ -0,0 +1,14 @@
+accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_controlnet_upscale.csv \
+ --data_file_keys "image,controlnet_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 400 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,jasperai/Flux.1-dev-Controlnet-Upscaler:diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 1 \
+ --remove_prefix_in_ckpt "pipe.controlnet.models.0." \
+ --output_path "./models/train/FLUX.1-dev-Controlnet-Upscaler_full" \
+ --trainable_models "controlnet" \
+ --extra_inputs "controlnet_image" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-IP-Adapter.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-IP-Adapter.sh
new file mode 100644
index 0000000000000000000000000000000000000000..43bc0062f4a6f0fc61dbf9607f30af0e2ec4b3b3
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-IP-Adapter.sh
@@ -0,0 +1,14 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_ipadapter.csv \
+ --data_file_keys "image,ipadapter_images" \
+ --max_pixels 1048576 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,InstantX/FLUX.1-dev-IP-Adapter:ip-adapter.bin,google/siglip-so400m-patch14-384:" \
+ --learning_rate 1e-5 \
+ --num_epochs 1 \
+ --remove_prefix_in_ckpt "pipe.ipadapter." \
+ --output_path "./models/train/FLUX.1-dev-IP-Adapter_full" \
+ --trainable_models "ipadapter" \
+ --extra_inputs "ipadapter_images" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-InfiniteYou.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-InfiniteYou.sh
new file mode 100644
index 0000000000000000000000000000000000000000..6040fa58a7f322c7fd21a77834300f039aa03e15
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-InfiniteYou.sh
@@ -0,0 +1,14 @@
+accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_infiniteyou.csv \
+ --data_file_keys "image,controlnet_image,infinityou_id_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 400 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,ByteDance/InfiniteYou:infu_flux_v1.0/aes_stage2/image_proj_model.bin,ByteDance/InfiniteYou:infu_flux_v1.0/aes_stage2/InfuseNetModel/*.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 1 \
+ --remove_prefix_in_ckpt "pipe." \
+ --output_path "./models/train/FLUX.1-dev-InfiniteYou_full" \
+ --trainable_models "controlnet,image_proj_model" \
+ --extra_inputs "controlnet_image,infinityou_id_image,infinityou_guidance" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-LoRA-Encoder.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-LoRA-Encoder.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f0d4f97cc1f6da5a668f852466cc19fdeae3107c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-LoRA-Encoder.sh
@@ -0,0 +1,14 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_lora_encoder.csv \
+ --data_file_keys "image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,DiffSynth-Studio/LoRA-Encoder-FLUX.1-Dev:model.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 1 \
+ --remove_prefix_in_ckpt "pipe.lora_encoder." \
+ --output_path "./models/train/FLUX.1-dev-LoRA-Encoder_full" \
+ --trainable_models "lora_encoder" \
+ --extra_inputs "lora_encoder_inputs" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9254957119bc97bde91f63b21261a250c37f75ef
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev.sh
@@ -0,0 +1,12 @@
+accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
+ --max_pixels 1048576 \
+ --dataset_repeat 400 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 1 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-dev_full" \
+ --trainable_models "dit" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/Nexus-Gen.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/Nexus-Gen.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ab1c32481c5f7aa6dbbd2a67f602e5b8aa6c92d4
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/Nexus-Gen.sh
@@ -0,0 +1,14 @@
+accelerate launch --config_file examples/flux/model_training/full/accelerate_config_zero2offload.yaml examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_nexusgen_edit.csv \
+ --data_file_keys "image,nexus_gen_reference_image" \
+ --max_pixels 262144 \
+ --dataset_repeat 400 \
+ --model_id_with_origin_paths "DiffSynth-Studio/Nexus-GenV2:model*.safetensors,DiffSynth-Studio/Nexus-GenV2:edit_decoder.bin,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 1 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-NexusGen-Edit_full" \
+ --trainable_models "dit" \
+ --extra_inputs "nexus_gen_reference_image" \
+ --use_gradient_checkpointing_offload
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/Step1X-Edit.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/Step1X-Edit.sh
new file mode 100644
index 0000000000000000000000000000000000000000..98c45cee9e0fb1af50b16e0c92724ff45aed08df
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/Step1X-Edit.sh
@@ -0,0 +1,14 @@
+accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_step1x.csv \
+ --data_file_keys "image,step1x_reference_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 400 \
+ --model_id_with_origin_paths "Qwen/Qwen2.5-VL-7B-Instruct:,stepfun-ai/Step1X-Edit:step1x-edit-i1258.safetensors,stepfun-ai/Step1X-Edit:vae.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 1 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Step1X-Edit_full" \
+ --trainable_models "dit" \
+ --extra_inputs "step1x_reference_image" \
+ --use_gradient_checkpointing_offload
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/accelerate_config.yaml b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/accelerate_config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..83280f73f315a32eccb065f351d66b4b2678759d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/accelerate_config.yaml
@@ -0,0 +1,22 @@
+compute_environment: LOCAL_MACHINE
+debug: false
+deepspeed_config:
+ gradient_accumulation_steps: 1
+ offload_optimizer_device: none
+ offload_param_device: none
+ zero3_init_flag: false
+ zero_stage: 2
+distributed_type: DEEPSPEED
+downcast_bf16: 'no'
+enable_cpu_affinity: false
+machine_rank: 0
+main_training_function: main
+mixed_precision: bf16
+num_machines: 1
+num_processes: 8
+rdzv_backend: static
+same_network: true
+tpu_env: []
+tpu_use_cluster: false
+tpu_use_sudo: false
+use_cpu: false
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/accelerate_config_zero2offload.yaml b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/accelerate_config_zero2offload.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a75f3d91eeae160409650b482e5383ac26b297b
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/accelerate_config_zero2offload.yaml
@@ -0,0 +1,22 @@
+compute_environment: LOCAL_MACHINE
+debug: false
+deepspeed_config:
+ gradient_accumulation_steps: 1
+ offload_optimizer_device: 'cpu'
+ offload_param_device: 'cpu'
+ zero3_init_flag: false
+ zero_stage: 2
+distributed_type: DEEPSPEED
+downcast_bf16: 'no'
+enable_cpu_affinity: false
+machine_rank: 0
+main_training_function: main
+mixed_precision: bf16
+num_machines: 1
+num_processes: 8
+rdzv_backend: static
+same_network: true
+tpu_env: []
+tpu_use_cluster: false
+tpu_use_sudo: false
+use_cpu: false
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLEX.2-preview.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLEX.2-preview.sh
new file mode 100644
index 0000000000000000000000000000000000000000..6abeb57cffe0a1cb41688069cd75c2a094b31a6c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLEX.2-preview.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "ostris/Flex.2-preview:Flex.2-preview.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLEX.2-preview_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --align_to_opensource_format \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-Kontext-dev.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-Kontext-dev.sh
new file mode 100644
index 0000000000000000000000000000000000000000..814d7ad06972ac4ac9b3c132fb4f71dc324d50b8
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-Kontext-dev.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_kontext.csv \
+ --data_file_keys "image,kontext_images" \
+ --max_pixels 1048576 \
+ --dataset_repeat 400 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-Kontext-dev:flux1-kontext-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-Kontext-dev_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --align_to_opensource_format \
+ --extra_inputs "kontext_images" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-Krea-dev.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-Krea-dev.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1d54d434110f376c9553f452b5d5e08c3dcb950e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-Krea-dev.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-Krea-dev:flux1-krea-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-Krea-dev_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --align_to_opensource_format \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-AttriCtrl.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-AttriCtrl.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7763c5f56cc8fb587d28493fa89296391954bd1d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-AttriCtrl.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_attrictrl.csv \
+ --data_file_keys "image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,DiffSynth-Studio/AttriCtrl-FLUX.1-Dev:models/brightness.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-dev-AttriCtrl_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --extra_inputs "value_controller_inputs" \
+ --align_to_opensource_format \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Inpainting-Beta.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Inpainting-Beta.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0de6a3067569d092e7c7a5d527319f7c13f5d26f
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Inpainting-Beta.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_controlnet_inpaint.csv \
+ --data_file_keys "image,controlnet_image,controlnet_inpaint_mask" \
+ --max_pixels 1048576 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta:diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-dev-Controlnet-Inpainting-Beta_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --extra_inputs "controlnet_image,controlnet_inpaint_mask" \
+ --align_to_opensource_format \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Union-alpha.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Union-alpha.sh
new file mode 100644
index 0000000000000000000000000000000000000000..5a65af857e67f53b6512d5c0ae495cfb7e57c3da
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Union-alpha.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_controlnet_canny.csv \
+ --data_file_keys "image,controlnet_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,InstantX/FLUX.1-dev-Controlnet-Union-alpha:diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-dev-Controlnet-Union-alpha_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --extra_inputs "controlnet_image,controlnet_processor_id" \
+ --align_to_opensource_format \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Upscaler.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Upscaler.sh
new file mode 100644
index 0000000000000000000000000000000000000000..72e89714ba8fdf716df16c19ca9a06f2d818e876
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Upscaler.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_controlnet_upscale.csv \
+ --data_file_keys "image,controlnet_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,jasperai/Flux.1-dev-Controlnet-Upscaler:diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-dev-Controlnet-Upscaler_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --extra_inputs "controlnet_image" \
+ --align_to_opensource_format \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-EliGen.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-EliGen.sh
new file mode 100644
index 0000000000000000000000000000000000000000..10a18e0376458b8a3392626df510a500216983b3
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-EliGen.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_eligen.json \
+ --data_file_keys "image,eligen_entity_masks" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-dev-EliGen_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --align_to_opensource_format \
+ --extra_inputs "eligen_entity_masks,eligen_entity_prompts" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-IP-Adapter.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-IP-Adapter.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0495c11157b72c63ce42b81d84b0e6be2ded50ad
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-IP-Adapter.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_ipadapter.csv \
+ --data_file_keys "image,ipadapter_images" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,InstantX/FLUX.1-dev-IP-Adapter:ip-adapter.bin,google/siglip-so400m-patch14-384:" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-dev-IP-Adapter_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --extra_inputs "ipadapter_images" \
+ --align_to_opensource_format \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-InfiniteYou.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-InfiniteYou.sh
new file mode 100644
index 0000000000000000000000000000000000000000..b51a5659ea5affadca5e6973534cd816dbcb9ebd
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-InfiniteYou.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_infiniteyou.csv \
+ --data_file_keys "image,controlnet_image,infinityou_id_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,ByteDance/InfiniteYou:infu_flux_v1.0/aes_stage2/image_proj_model.bin,ByteDance/InfiniteYou:infu_flux_v1.0/aes_stage2/InfuseNetModel/*.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-dev-InfiniteYou_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --extra_inputs "controlnet_image,infinityou_id_image,infinityou_guidance" \
+ --align_to_opensource_format \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev.sh
new file mode 100644
index 0000000000000000000000000000000000000000..4b207ef855d0c94c9ccde595620deb544124a43e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-dev_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --align_to_opensource_format \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/Nexus-Gen.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/Nexus-Gen.sh
new file mode 100644
index 0000000000000000000000000000000000000000..3e6eac1eb7143ca1bbd8fa9996a22cc0e3ded6dc
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/Nexus-Gen.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_nexusgen_edit.csv \
+ --data_file_keys "image,nexus_gen_reference_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 400 \
+ --model_id_with_origin_paths "DiffSynth-Studio/Nexus-GenV2:model*.safetensors,DiffSynth-Studio/Nexus-GenV2:edit_decoder.bin,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/FLUX.1-NexusGen-Edit_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --align_to_opensource_format \
+ --extra_inputs "nexus_gen_reference_image" \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/Step1X-Edit.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/Step1X-Edit.sh
new file mode 100644
index 0000000000000000000000000000000000000000..01ac260419685567693e12c8c8c55fd9667f3532
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/Step1X-Edit.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/flux/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_step1x.csv \
+ --data_file_keys "image,step1x_reference_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen2.5-VL-7B-Instruct:,stepfun-ai/Step1X-Edit:step1x-edit-i1258.safetensors,stepfun-ai/Step1X-Edit:vae.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Step1X-Edit_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
+ --lora_rank 32 \
+ --extra_inputs "step1x_reference_image" \
+ --align_to_opensource_format \
+ --use_gradient_checkpointing
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLEX.2-preview.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLEX.2-preview.py
new file mode 100644
index 0000000000000000000000000000000000000000..78f64b606a7447dba5317f07e2ce8f9ea9af0bbb
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLEX.2-preview.py
@@ -0,0 +1,20 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+from diffsynth import load_state_dict
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="ostris/Flex.2-preview", origin_file_pattern="Flex.2-preview.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ],
+)
+state_dict = load_state_dict("models/train/FLEX.2-preview_full/epoch-0.safetensors")
+pipe.dit.load_state_dict(state_dict)
+
+image = pipe(prompt="dog,white and brown dog, sitting on wall, under pink flowers", seed=0)
+image.save("image_FLEX.2-preview_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-Kontext-dev.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-Kontext-dev.py
new file mode 100644
index 0000000000000000000000000000000000000000..af3ee3619d36cf48598cf463bbd5c0866b1b381c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-Kontext-dev.py
@@ -0,0 +1,26 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+from diffsynth import load_state_dict
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-Kontext-dev", origin_file_pattern="flux1-kontext-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ],
+)
+state_dict = load_state_dict("models/train/FLUX.1-Kontext-dev_full/epoch-0.safetensors")
+pipe.dit.load_state_dict(state_dict)
+
+image = pipe(
+ prompt="Make the dog turn its head around.",
+ kontext_images=Image.open("data/example_image_dataset/2.jpg").resize((768, 768)),
+ height=768, width=768,
+ seed=0
+)
+image.save("image_FLUX.1-Kontext-dev_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-Krea-dev.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-Krea-dev.py
new file mode 100644
index 0000000000000000000000000000000000000000..bceb4644d20db6a182793f982f52f511e82d7206
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-Krea-dev.py
@@ -0,0 +1,20 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+from diffsynth import load_state_dict
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-Krea-dev", origin_file_pattern="flux1-krea-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ],
+)
+state_dict = load_state_dict("models/train/FLUX.1-Krea-dev_full/epoch-0.safetensors")
+pipe.dit.load_state_dict(state_dict)
+
+image = pipe(prompt="a dog", seed=0)
+image.save("image_FLUX.1-Krea-dev_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-AttriCtrl.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-AttriCtrl.py
new file mode 100644
index 0000000000000000000000000000000000000000..17384fce08fa4fad90353ff5b8e8244eb8a23800
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-AttriCtrl.py
@@ -0,0 +1,21 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+from diffsynth import load_state_dict
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="DiffSynth-Studio/AttriCtrl-FLUX.1-Dev", origin_file_pattern="models/brightness.safetensors")
+ ],
+)
+state_dict = load_state_dict("models/train/FLUX.1-dev-AttriCtrl_full/epoch-0.safetensors")
+pipe.value_controller.encoders[0].load_state_dict(state_dict)
+
+image = pipe(prompt="a cat", seed=0, value_controller_inputs=0.1, rand_device="cuda")
+image.save("image_FLUX.1-dev-AttriCtrl_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Inpainting-Beta.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Inpainting-Beta.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d35f7e33fea04b4aefdc3ee55572b7b2bd35fe3
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Inpainting-Beta.py
@@ -0,0 +1,31 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
+from diffsynth import load_state_dict
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta", origin_file_pattern="diffusion_pytorch_model.safetensors"),
+ ],
+)
+state_dict = load_state_dict("models/train/FLUX.1-dev-Controlnet-Inpainting-Beta_full/epoch-0.safetensors")
+pipe.controlnet.models[0].load_state_dict(state_dict)
+
+image = pipe(
+ prompt="a cat sitting on a chair, wearing sunglasses",
+ controlnet_inputs=[ControlNetInput(
+ image=Image.open("data/example_image_dataset/inpaint/image_1.jpg"),
+ inpaint_mask=Image.open("data/example_image_dataset/inpaint/mask.jpg"),
+ scale=0.9
+ )],
+ height=1024, width=1024,
+ seed=0, rand_device="cuda",
+)
+image.save("image_FLUX.1-dev-Controlnet-Inpainting-Beta_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Union-alpha.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Union-alpha.py
new file mode 100644
index 0000000000000000000000000000000000000000..ceaadd8d114219a33e764a59f65f4d66c41104bb
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Union-alpha.py
@@ -0,0 +1,31 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
+from diffsynth import load_state_dict
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="InstantX/FLUX.1-dev-Controlnet-Union-alpha", origin_file_pattern="diffusion_pytorch_model.safetensors"),
+ ],
+)
+state_dict = load_state_dict("models/train/FLUX.1-dev-Controlnet-Union-alpha_full/epoch-0.safetensors")
+pipe.controlnet.models[0].load_state_dict(state_dict)
+
+image = pipe(
+ prompt="a dog",
+ controlnet_inputs=[ControlNetInput(
+ image=Image.open("data/example_image_dataset/canny/image_1.jpg"),
+ scale=0.9,
+ processor_id="canny",
+ )],
+ height=768, width=768,
+ seed=0, rand_device="cuda",
+)
+image.save("image_FLUX.1-dev-Controlnet-Union-alpha_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Upscaler.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Upscaler.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ff8319e373ccf5e61026f7308d09b0d14987054
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Upscaler.py
@@ -0,0 +1,30 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
+from diffsynth import load_state_dict
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="jasperai/Flux.1-dev-Controlnet-Upscaler", origin_file_pattern="diffusion_pytorch_model.safetensors"),
+ ],
+)
+state_dict = load_state_dict("models/train/FLUX.1-dev-Controlnet-Upscaler_full/epoch-0.safetensors")
+pipe.controlnet.models[0].load_state_dict(state_dict)
+
+image = pipe(
+ prompt="a dog",
+ controlnet_inputs=[ControlNetInput(
+ image=Image.open("data/example_image_dataset/upscale/image_1.jpg"),
+ scale=0.9
+ )],
+ height=768, width=768,
+ seed=0, rand_device="cuda",
+)
+image.save("image_FLUX.1-dev-Controlnet-Upscaler_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-IP-Adapter.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-IP-Adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6bab3d8e8fd3be30814abc181ff211faab05057
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-IP-Adapter.py
@@ -0,0 +1,28 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+from diffsynth import load_state_dict
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="InstantX/FLUX.1-dev-IP-Adapter", origin_file_pattern="ip-adapter.bin"),
+ ModelConfig(model_id="google/siglip-so400m-patch14-384"),
+ ],
+)
+state_dict = load_state_dict("models/train/FLUX.1-dev-IP-Adapter_full/epoch-0.safetensors")
+pipe.ipadapter.load_state_dict(state_dict)
+
+image = pipe(
+ prompt="a dog",
+ ipadapter_images=Image.open("data/example_image_dataset/1.jpg"),
+ height=768, width=768,
+ seed=0
+)
+image.save("image_FLUX.1-dev-IP-Adapter_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-InfiniteYou.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-InfiniteYou.py
new file mode 100644
index 0000000000000000000000000000000000000000..55b7275038d25a6dbcb3d4a6ee60f7ae9c7e73b6
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-InfiniteYou.py
@@ -0,0 +1,33 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
+from diffsynth import load_state_dict
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="ByteDance/InfiniteYou", origin_file_pattern="infu_flux_v1.0/aes_stage2/image_proj_model.bin"),
+ ModelConfig(model_id="ByteDance/InfiniteYou", origin_file_pattern="infu_flux_v1.0/aes_stage2/InfuseNetModel/*.safetensors"),
+ ],
+)
+state_dict = load_state_dict("models/train/FLUX.1-dev-InfiniteYou_full/epoch-0.safetensors")
+state_dict_projector = {i.replace("image_proj_model.", ""): state_dict[i] for i in state_dict if i.startswith("image_proj_model.")}
+pipe.image_proj_model.load_state_dict(state_dict_projector)
+state_dict_controlnet = {i.replace("controlnet.models.0.", ""): state_dict[i] for i in state_dict if i.startswith("controlnet.models.0.")}
+pipe.controlnet.models[0].load_state_dict(state_dict_controlnet)
+
+image = pipe(
+ prompt="a man with a red hat",
+ controlnet_inputs=[ControlNetInput(
+ image=Image.open("data/example_image_dataset/infiniteyou/image_1.jpg"),
+ )],
+ height=1024, width=1024,
+ seed=0, rand_device="cuda",
+)
+image.save("image_FLUX.1-dev-InfiniteYou_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-LoRA-Encoder.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-LoRA-Encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..166f5a4be7b231c14b2d4c3b9bcf04387b140585
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-LoRA-Encoder.py
@@ -0,0 +1,25 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+from diffsynth import load_state_dict
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="DiffSynth-Studio/LoRA-Encoder-FLUX.1-Dev", origin_file_pattern="model.safetensors"),
+ ],
+)
+pipe.enable_lora_magic()
+state_dict = load_state_dict("models/train/FLUX.1-dev-LoRA-Encoder_full/epoch-0.safetensors")
+pipe.lora_encoder.load_state_dict(state_dict)
+
+lora = ModelConfig(model_id="VoidOc/flux_animal_forest1", origin_file_pattern="20.safetensors")
+pipe.load_lora(pipe.dit, lora, hotload=True) # Use `pipe.clear_lora()` to drop the loaded LoRA.
+
+image = pipe(prompt="", seed=0, lora_encoder_inputs=lora)
+image.save("image_FLUX.1-dev-LoRA-Encoder_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3adf7a6435f1a4abaa40bebd4445fe66278e881
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev.py
@@ -0,0 +1,20 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+from diffsynth import load_state_dict
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ],
+)
+state_dict = load_state_dict("models/train/FLUX.1-dev_full/epoch-0.safetensors")
+pipe.dit.load_state_dict(state_dict)
+
+image = pipe(prompt="a dog", seed=0)
+image.save("image_FLUX.1-dev_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/Nexus-Gen.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/Nexus-Gen.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f7a2d240a1baef2866ef1f7db34c47ca2850dcd
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/Nexus-Gen.py
@@ -0,0 +1,28 @@
+import torch
+from PIL import Image
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+from diffsynth import load_state_dict
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="DiffSynth-Studio/Nexus-GenV2", origin_file_pattern="model*.safetensors"),
+ ModelConfig(model_id="DiffSynth-Studio/Nexus-GenV2", origin_file_pattern="edit_decoder.bin"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ],
+)
+state_dict = load_state_dict("models/train/FLUX.1-NexusGen-Edit_full/epoch-0.safetensors")
+pipe.dit.load_state_dict(state_dict)
+
+ref_image = Image.open("data/example_image_dataset/nexus_gen/image_1.png").convert("RGB")
+prompt = "Add a pair of sunglasses."
+image = pipe(
+ prompt=prompt, negative_prompt="",
+ seed=42, cfg_scale=2.0, num_inference_steps=50,
+ nexus_gen_reference_image=ref_image,
+ height=512, width=512,
+)
+image.save("NexusGen-Edit_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/Step1X-Edit.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/Step1X-Edit.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab184e7d2cde606615d5d69766990dbae6fd9c94
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/Step1X-Edit.py
@@ -0,0 +1,25 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+from diffsynth import load_state_dict
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen2.5-VL-7B-Instruct"),
+ ModelConfig(model_id="stepfun-ai/Step1X-Edit", origin_file_pattern="step1x-edit-i1258.safetensors"),
+ ModelConfig(model_id="stepfun-ai/Step1X-Edit", origin_file_pattern="vae.safetensors"),
+ ],
+)
+state_dict = load_state_dict("models/train/Step1X-Edit_full/epoch-0.safetensors")
+pipe.dit.load_state_dict(state_dict)
+
+image = pipe(
+ prompt="Make the dog turn its head around.",
+ step1x_reference_image=Image.open("data/example_image_dataset/2.jpg").resize((768, 768)),
+ height=768, width=768, cfg_scale=6,
+ seed=0
+)
+image.save("image_Step1X-Edit_full.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLEX.2-preview.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLEX.2-preview.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ef01427ffb47560c96f620268c9b8b5a255677a
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLEX.2-preview.py
@@ -0,0 +1,18 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="ostris/Flex.2-preview", origin_file_pattern="Flex.2-preview.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/FLEX.2-preview_lora/epoch-4.safetensors", alpha=1)
+
+image = pipe(prompt="dog,white and brown dog, sitting on wall, under pink flowers", seed=0)
+image.save("image_FLEX.2-preview_lora.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-Kontext-dev.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-Kontext-dev.py
new file mode 100644
index 0000000000000000000000000000000000000000..b61cd4b6db1601220e2de32323e5a57e1e72a18c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-Kontext-dev.py
@@ -0,0 +1,24 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-Kontext-dev", origin_file_pattern="flux1-kontext-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/FLUX.1-Kontext-dev_lora/epoch-4.safetensors", alpha=1)
+
+image = pipe(
+ prompt="Make the dog turn its head around.",
+ kontext_images=Image.open("data/example_image_dataset/2.jpg").resize((768, 768)),
+ height=768, width=768,
+ seed=0
+)
+image.save("image_FLUX.1-Kontext-dev_lora.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-Krea-dev.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-Krea-dev.py
new file mode 100644
index 0000000000000000000000000000000000000000..068ce7200892a84d00a412083fd7a14056f89d12
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-Krea-dev.py
@@ -0,0 +1,18 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-Krea-dev", origin_file_pattern="flux1-krea-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/FLUX.1-Krea-dev_lora/epoch-4.safetensors", alpha=1)
+
+image = pipe(prompt="a dog", seed=0)
+image.save("image_FLUX.1-Krea-dev_lora.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-AttriCtrl.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-AttriCtrl.py
new file mode 100644
index 0000000000000000000000000000000000000000..f44df0dff307dbaab5867db429362b713d20203c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-AttriCtrl.py
@@ -0,0 +1,19 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="DiffSynth-Studio/AttriCtrl-FLUX.1-Dev", origin_file_pattern="models/brightness.safetensors")
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-AttriCtrl_lora/epoch-3.safetensors", alpha=1)
+
+image = pipe(prompt="a cat", seed=0, value_controller_inputs=0.1, rand_device="cuda")
+image.save("image_FLUX.1-dev-AttriCtrl_lora.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Inpainting-Beta.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Inpainting-Beta.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d88b8c434efb0351f10b01498da4d1ec657ec20
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Inpainting-Beta.py
@@ -0,0 +1,29 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta", origin_file_pattern="diffusion_pytorch_model.safetensors"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-Controlnet-Inpainting-Beta_lora/epoch-4.safetensors", alpha=1)
+
+image = pipe(
+ prompt="a cat sitting on a chair, wearing sunglasses",
+ controlnet_inputs=[ControlNetInput(
+ image=Image.open("data/example_image_dataset/inpaint/image_1.jpg"),
+ inpaint_mask=Image.open("data/example_image_dataset/inpaint/mask.jpg"),
+ scale=0.9
+ )],
+ height=1024, width=1024,
+ seed=0, rand_device="cuda",
+)
+image.save("image_FLUX.1-dev-Controlnet-Inpainting-Beta_lora.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Union-alpha.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Union-alpha.py
new file mode 100644
index 0000000000000000000000000000000000000000..240d8b62e0dcdbb1b0fddb177b01e2ea6950a1d6
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Union-alpha.py
@@ -0,0 +1,29 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="InstantX/FLUX.1-dev-Controlnet-Union-alpha", origin_file_pattern="diffusion_pytorch_model.safetensors"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-Controlnet-Union-alpha_lora/epoch-4.safetensors", alpha=1)
+
+image = pipe(
+ prompt="a dog",
+ controlnet_inputs=[ControlNetInput(
+ image=Image.open("data/example_image_dataset/canny/image_1.jpg"),
+ scale=0.9,
+ processor_id="canny",
+ )],
+ height=768, width=768,
+ seed=0, rand_device="cuda",
+)
+image.save("image_FLUX.1-dev-Controlnet-Union-alpha_lora.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Upscaler.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Upscaler.py
new file mode 100644
index 0000000000000000000000000000000000000000..b27896c2bad50f630f665ba64e0bca036ab34bb1
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Upscaler.py
@@ -0,0 +1,28 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="jasperai/Flux.1-dev-Controlnet-Upscaler", origin_file_pattern="diffusion_pytorch_model.safetensors"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-Controlnet-Upscaler_lora/epoch-4.safetensors", alpha=1)
+
+image = pipe(
+ prompt="a dog",
+ controlnet_inputs=[ControlNetInput(
+ image=Image.open("data/example_image_dataset/upscale/image_1.jpg"),
+ scale=0.9
+ )],
+ height=768, width=768,
+ seed=0, rand_device="cuda",
+)
+image.save("image_FLUX.1-dev-Controlnet-Upscaler_lora.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-EliGen.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-EliGen.py
new file mode 100644
index 0000000000000000000000000000000000000000..7df3db2a3414148d4a121790fa7e6dc74b4c3848
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-EliGen.py
@@ -0,0 +1,33 @@
+import torch
+from PIL import Image
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ],
+)
+
+pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-EliGen_lora/epoch-4.safetensors", alpha=1)
+
+entity_prompts = ["A beautiful girl", "sign 'Entity Control'", "shorts", "shirt"]
+global_prompt = "A beautiful girl wearing shirt and shorts in the street, holding a sign 'Entity Control'"
+masks = [Image.open(f"data/example_image_dataset/eligen/{i}.png").convert('RGB') for i in range(len(entity_prompts))]
+# generate image
+image = pipe(
+ prompt=global_prompt,
+ cfg_scale=1.0,
+ num_inference_steps=50,
+ embedded_guidance=3.5,
+ seed=42,
+ height=1024,
+ width=1024,
+ eligen_entity_prompts=entity_prompts,
+ eligen_entity_masks=masks,
+)
+image.save(f"EliGen_lora.png")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-IP-Adapter.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-IP-Adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..b085ad7193931f2679575294c02d749fc71b8ca5
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-IP-Adapter.py
@@ -0,0 +1,26 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="InstantX/FLUX.1-dev-IP-Adapter", origin_file_pattern="ip-adapter.bin"),
+ ModelConfig(model_id="google/siglip-so400m-patch14-384"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-IP-Adapter_lora/epoch-4.safetensors", alpha=1)
+
+image = pipe(
+ prompt="dog,white and brown dog, sitting on wall, under pink flowers",
+ ipadapter_images=Image.open("data/example_image_dataset/1.jpg"),
+ height=768, width=768,
+ seed=0
+)
+image.save("image_FLUX.1-dev-IP-Adapter_lora.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-InfiniteYou.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-InfiniteYou.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d9d8a2e240e9f53510f0a20ab9f178643db45a2
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-InfiniteYou.py
@@ -0,0 +1,28 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ModelConfig(model_id="ByteDance/InfiniteYou", origin_file_pattern="infu_flux_v1.0/aes_stage2/image_proj_model.bin"),
+ ModelConfig(model_id="ByteDance/InfiniteYou", origin_file_pattern="infu_flux_v1.0/aes_stage2/InfuseNetModel/*.safetensors"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-InfiniteYou_lora/epoch-4.safetensors", alpha=1)
+
+image = pipe(
+ prompt="a man with a red hat",
+ controlnet_inputs=[ControlNetInput(
+ image=Image.open("data/example_image_dataset/infiniteyou/image_1.jpg"),
+ )],
+ height=1024, width=1024,
+ seed=0, rand_device="cuda",
+)
+image.save("image_FLUX.1-dev-InfiniteYou_lora.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1aebef4887d2be10c3384b7c1409af139911206
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev.py
@@ -0,0 +1,18 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev_lora/epoch-4.safetensors", alpha=1)
+
+image = pipe(prompt="a dog", seed=0)
+image.save("image_FLUX.1-dev_lora.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/Nexus-Gen.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/Nexus-Gen.py
new file mode 100644
index 0000000000000000000000000000000000000000..21c376fbb1e8a8996a18786f44dcdc86dfaa1ff5
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/Nexus-Gen.py
@@ -0,0 +1,26 @@
+import torch
+from PIL import Image
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="DiffSynth-Studio/Nexus-GenV2", origin_file_pattern="model*.safetensors"),
+ ModelConfig(model_id="DiffSynth-Studio/Nexus-GenV2", origin_file_pattern="edit_decoder.bin"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/FLUX.1-NexusGen-Edit_lora/epoch-4.safetensors", alpha=1)
+
+ref_image = Image.open("data/example_image_dataset/nexus_gen/image_1.png").convert("RGB")
+prompt = "Add a pair of sunglasses."
+image = pipe(
+ prompt=prompt, negative_prompt="",
+ seed=42, cfg_scale=1.0, num_inference_steps=50,
+ nexus_gen_reference_image=ref_image,
+ height=512, width=512,
+)
+image.save("NexusGen-Edit_lora.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/Step1X-Edit.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/Step1X-Edit.py
new file mode 100644
index 0000000000000000000000000000000000000000..886af3f686c96424bac7585860aeb82e40100fab
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/Step1X-Edit.py
@@ -0,0 +1,23 @@
+import torch
+from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
+from PIL import Image
+
+
+pipe = FluxImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen2.5-VL-7B-Instruct"),
+ ModelConfig(model_id="stepfun-ai/Step1X-Edit", origin_file_pattern="step1x-edit-i1258.safetensors"),
+ ModelConfig(model_id="stepfun-ai/Step1X-Edit", origin_file_pattern="vae.safetensors"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Step1X-Edit_lora/epoch-4.safetensors", alpha=1)
+
+image = pipe(
+ prompt="Make the dog turn its head around.",
+ step1x_reference_image=Image.open("data/example_image_dataset/2.jpg").resize((768, 768)),
+ height=768, width=768, cfg_scale=6,
+ seed=0
+)
+image.save("image_Step1X-Edit_lora.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_quality_metric/README.md b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_quality_metric/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..6d5bf8ba49a89c4d216864f2a4355ff5d4df8568
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_quality_metric/README.md
@@ -0,0 +1,15 @@
+# Image Quality Metric
+
+The image quality assessment functionality has been integrated into Diffsynth. We support the following models:
+
+* [ImageReward](https://github.com/THUDM/ImageReward)
+* [Aesthetic](https://github.com/christophschuhmann/improved-aesthetic-predictor)
+* [PickScore](https://github.com/yuvalkirstain/pickscore)
+* [CLIP](https://github.com/openai/CLIP)
+* [HPSv2](https://github.com/tgxs002/HPSv2)
+* [HPSv2.1](https://github.com/tgxs002/HPSv2)
+* [MPS](https://github.com/Kwai-Kolors/MPS)
+
+## Usage
+
+See [`./image_quality_evaluation.py`](./image_quality_evaluation.py) for more details.
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_quality_metric/image_quality_evaluation.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_quality_metric/image_quality_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..9911cf8d234b4e3b2402b7c7ae24a194ea330773
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_quality_metric/image_quality_evaluation.py
@@ -0,0 +1,23 @@
+from diffsynth.extensions.ImageQualityMetric import download_preference_model, load_preference_model
+from modelscope import dataset_snapshot_download
+from PIL import Image
+
+
+# Download example image
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ allow_file_pattern="data/examples/ImageQualityMetric/image.jpg",
+ local_dir="./"
+)
+
+# Parameters
+prompt = "an orange cat"
+image = Image.open("data/examples/ImageQualityMetric/image.jpg")
+device = "cuda"
+cache_dir = "./models"
+
+# Run preference models
+for model_name in ["ImageReward", "Aesthetic", "PickScore", "CLIP", "HPSv2", "HPSv2.1", "MPS"]:
+ path = download_preference_model(model_name, cache_dir=cache_dir)
+ preference_model = load_preference_model(model_name, device=device, path=path)
+ print(model_name, preference_model.score(image, prompt))
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/README.md b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ac133d28a46aa6a83e489580725850dc72c34b08
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/README.md
@@ -0,0 +1,95 @@
+# Image Synthesis
+
+Image synthesis is the base feature of DiffSynth Studio. We can generate images with very high resolution.
+
+### OmniGen
+
+OmniGen is a text-image-to-image model, you can synthesize an image according to several given reference images.
+
+|Reference image 1|Reference image 2|Synthesized image|
+|-|-|-|
+||||
+
+### Example: FLUX
+
+Example script: [`flux_text_to_image.py`](./flux_text_to_image.py) and [`flux_text_to_image_low_vram.py`](./flux_text_to_image_low_vram.py)(low VRAM).
+
+The original version of FLUX doesn't support classifier-free guidance; however, we believe that this guidance mechanism is an important feature for synthesizing beautiful images. You can enable it using the parameter `cfg_scale`, and the extra guidance scale introduced by FLUX is `embedded_guidance`.
+
+|1024*1024 (original)|1024*1024 (classifier-free guidance)|2048*2048 (highres-fix)|
+|-|-|-|
+||||
+
+### Example: Stable Diffusion
+
+Example script: [`sd_text_to_image.py`](./sd_text_to_image.py)
+
+LoRA Training: [`../train/stable_diffusion/`](../train/stable_diffusion/)
+
+|512*512|1024*1024|2048*2048|4096*4096|
+|-|-|-|-|
+|||||
+
+### Example: Stable Diffusion XL
+
+Example script: [`sdxl_text_to_image.py`](./sdxl_text_to_image.py)
+
+LoRA Training: [`../train/stable_diffusion_xl/`](../train/stable_diffusion_xl/)
+
+|1024*1024|2048*2048|
+|-|-|
+|||
+
+### Example: Stable Diffusion 3
+
+Example script: [`sd3_text_to_image.py`](./sd3_text_to_image.py)
+
+LoRA Training: [`../train/stable_diffusion_3/`](../train/stable_diffusion_3/)
+
+|1024*1024|2048*2048|
+|-|-|
+|||
+
+### Example: Kolors
+
+Example script: [`kolors_text_to_image.py`](./kolors_text_to_image.py)
+
+LoRA Training: [`../train/kolors/`](../train/kolors/)
+
+|1024*1024|2048*2048|
+|-|-|
+|||
+
+Kolors also support the models trained for SD-XL. For example, ControlNets and LoRAs. See [`kolors_with_sdxl_models.py`](./kolors_with_sdxl_models.py)
+
+LoRA: https://civitai.com/models/73305/zyd232s-ink-style
+
+|Base model|with LoRA (alpha=0.5)|with LoRA (alpha=1.0)|with LoRA (alpha=1.5)|
+|-|-|-|-|
+|||||
+
+ControlNet: https://huggingface.co/xinsir/controlnet-union-sdxl-1.0
+
+|Reference image|Depth image|with ControlNet|with ControlNet|
+|-|-|-|-|
+|||||
+
+### Example: Hunyuan-DiT
+
+Example script: [`hunyuan_dit_text_to_image.py`](./hunyuan_dit_text_to_image.py)
+
+LoRA Training: [`../train/hunyuan_dit/`](../train/hunyuan_dit/)
+
+|1024*1024|2048*2048|
+|-|-|
+|||
+
+### Example: Stable Diffusion XL Turbo
+
+Example script: [`sdxl_turbo.py`](./sdxl_turbo.py)
+
+We highly recommend you to use this model in the WebUI.
+
+|"black car"|"red car"|
+|-|-|
+|||
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/flex_text_to_image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/flex_text_to_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..3770764256b25abc75e52cf17c6924252bde239b
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/flex_text_to_image.py
@@ -0,0 +1,49 @@
+import torch
+from diffsynth import ModelManager, FluxImagePipeline, download_models
+from diffsynth.controlnets.processors import Annotator
+import numpy as np
+from PIL import Image
+
+
+download_models(["FLUX.1-dev"])
+model_manager = ModelManager(torch_dtype=torch.bfloat16, device="cuda")
+model_manager.load_models([
+ "models/FLUX/FLUX.1-dev/text_encoder/model.safetensors",
+ "models/FLUX/FLUX.1-dev/text_encoder_2",
+ "models/FLUX/FLUX.1-dev/ae.safetensors",
+ "models/ostris/Flex.2-preview/Flex.2-preview.safetensors"
+])
+pipe = FluxImagePipeline.from_model_manager(model_manager)
+
+image = pipe(
+ prompt="portrait of a beautiful Asian girl, long hair, red t-shirt, sunshine, beach",
+ num_inference_steps=50, embedded_guidance=3.5,
+ seed=0
+)
+image.save("image_1.jpg")
+
+mask = np.zeros((1024, 1024, 3), dtype=np.uint8)
+mask[200:400, 400:700] = 255
+mask = Image.fromarray(mask)
+mask.save("image_mask.jpg")
+
+inpaint_image = image
+
+image = pipe(
+ prompt="portrait of a beautiful Asian girl with sunglasses, long hair, red t-shirt, sunshine, beach",
+ num_inference_steps=50, embedded_guidance=3.5,
+ flex_inpaint_image=inpaint_image, flex_inpaint_mask=mask,
+ seed=4
+)
+image.save("image_2.jpg")
+
+control_image = Annotator("canny")(image)
+control_image.save("image_control.jpg")
+
+image = pipe(
+ prompt="portrait of a beautiful Asian girl with sunglasses, long hair, yellow t-shirt, sunshine, beach",
+ num_inference_steps=50, embedded_guidance=3.5,
+ flex_control_image=control_image,
+ seed=4
+)
+image.save("image_3.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/flux_text_to_image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/flux_text_to_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a50df39bde1694c7503a2c8bbc71071a4261049
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/flux_text_to_image.py
@@ -0,0 +1,41 @@
+import torch
+from diffsynth import ModelManager, FluxImagePipeline, download_models
+
+
+download_models(["FLUX.1-dev"])
+model_manager = ModelManager(torch_dtype=torch.bfloat16, device="cuda")
+model_manager.load_models([
+ "models/FLUX/FLUX.1-dev/text_encoder/model.safetensors",
+ "models/FLUX/FLUX.1-dev/text_encoder_2",
+ "models/FLUX/FLUX.1-dev/ae.safetensors",
+ "models/FLUX/FLUX.1-dev/flux1-dev.safetensors"
+])
+pipe = FluxImagePipeline.from_model_manager(model_manager)
+
+prompt = "CG, masterpiece, best quality, solo, long hair, wavy hair, silver hair, blue eyes, blue dress, medium breasts, dress, underwater, air bubble, floating hair, refraction, portrait. The girl's flowing silver hair shimmers with every color of the rainbow and cascades down, merging with the floating flora around her."
+negative_prompt = "worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw,"
+
+# Disable classifier-free guidance (consistent with the original implementation of FLUX.1)
+torch.manual_seed(9)
+image = pipe(
+ prompt=prompt,
+ num_inference_steps=50, embedded_guidance=3.5
+)
+image.save("image_1024.jpg")
+
+# Enable classifier-free guidance
+torch.manual_seed(9)
+image = pipe(
+ prompt=prompt, negative_prompt=negative_prompt,
+ num_inference_steps=50, cfg_scale=2.0, embedded_guidance=3.5
+)
+image.save("image_1024_cfg.jpg")
+
+# Highres-fix
+torch.manual_seed(10)
+image = pipe(
+ prompt=prompt,
+ num_inference_steps=50, embedded_guidance=3.5,
+ input_image=image.resize((2048, 2048)), height=2048, width=2048, denoising_strength=0.6, tiled=True
+)
+image.save("image_2048_highres.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/flux_text_to_image_low_vram.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/flux_text_to_image_low_vram.py
new file mode 100644
index 0000000000000000000000000000000000000000..985f009d336e7945471d26cda2b3606905273d47
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/flux_text_to_image_low_vram.py
@@ -0,0 +1,51 @@
+import torch
+from diffsynth import download_models, ModelManager, FluxImagePipeline
+
+
+download_models(["FLUX.1-dev"])
+
+model_manager = ModelManager(
+ torch_dtype=torch.bfloat16,
+ device="cpu" # To reduce VRAM required, we load models to RAM.
+)
+model_manager.load_models([
+ "models/FLUX/FLUX.1-dev/text_encoder/model.safetensors",
+ "models/FLUX/FLUX.1-dev/text_encoder_2",
+ "models/FLUX/FLUX.1-dev/ae.safetensors",
+])
+model_manager.load_models(
+ ["models/FLUX/FLUX.1-dev/flux1-dev.safetensors"],
+ torch_dtype=torch.float8_e4m3fn # Load the DiT model in FP8 format.
+)
+
+pipe = FluxImagePipeline.from_model_manager(model_manager, device="cuda")
+pipe.enable_cpu_offload()
+pipe.dit.quantize()
+
+prompt = "CG, masterpiece, best quality, solo, long hair, wavy hair, silver hair, blue eyes, blue dress, medium breasts, dress, underwater, air bubble, floating hair, refraction, portrait. The girl's flowing silver hair shimmers with every color of the rainbow and cascades down, merging with the floating flora around her."
+negative_prompt = "worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw,"
+
+# Disable classifier-free guidance (consistent with the original implementation of FLUX.1)
+torch.manual_seed(9)
+image = pipe(
+ prompt=prompt,
+ num_inference_steps=50, embedded_guidance=3.5
+)
+image.save("image_1024.jpg")
+
+# Enable classifier-free guidance
+torch.manual_seed(9)
+image = pipe(
+ prompt=prompt, negative_prompt=negative_prompt,
+ num_inference_steps=50, cfg_scale=2.0, embedded_guidance=3.5
+)
+image.save("image_1024_cfg.jpg")
+
+# Highres-fix
+torch.manual_seed(10)
+image = pipe(
+ prompt=prompt,
+ num_inference_steps=50, embedded_guidance=3.5,
+ input_image=image.resize((2048, 2048)), height=2048, width=2048, denoising_strength=0.6, tiled=True
+)
+image.save("image_2048_highres.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/hunyuan_dit_text_to_image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/hunyuan_dit_text_to_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e4df91a510a0f0c30eb9c955926f9ee5acbd520
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/hunyuan_dit_text_to_image.py
@@ -0,0 +1,42 @@
+from diffsynth import ModelManager, HunyuanDiTImagePipeline, download_models
+import torch
+
+
+# Download models (automatically)
+# `models/HunyuanDiT/t2i/clip_text_encoder/pytorch_model.bin`: [link](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT/resolve/main/t2i/clip_text_encoder/pytorch_model.bin)
+# `models/HunyuanDiT/t2i/mt5/pytorch_model.bin`: [link](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT/resolve/main/t2i/mt5/pytorch_model.bin)
+# `models/HunyuanDiT/t2i/model/pytorch_model_ema.pt`: [link](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT/resolve/main/t2i/model/pytorch_model_ema.pt)
+# `models/HunyuanDiT/t2i/sdxl-vae-fp16-fix/diffusion_pytorch_model.bin`: [link](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT/resolve/main/t2i/sdxl-vae-fp16-fix/diffusion_pytorch_model.bin)
+download_models(["HunyuanDiT"])
+
+# Load models
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
+model_manager.load_models([
+ "models/HunyuanDiT/t2i/clip_text_encoder/pytorch_model.bin",
+ "models/HunyuanDiT/t2i/mt5/pytorch_model.bin",
+ "models/HunyuanDiT/t2i/model/pytorch_model_ema.pt",
+ "models/HunyuanDiT/t2i/sdxl-vae-fp16-fix/diffusion_pytorch_model.bin"
+])
+pipe = HunyuanDiTImagePipeline.from_model_manager(model_manager)
+
+prompt = "一幅充满诗意美感的全身肖像画,画中一位银发、蓝色眼睛、身穿蓝色连衣裙的少女漂浮在水下,周围是光彩的气泡,和煦的阳光透过水面折射进水下"
+negative_prompt = "错误的眼睛,糟糕的人脸,毁容,糟糕的艺术,变形,多余的肢体,模糊的颜色,模糊,重复,病态,残缺,"
+
+# Enjoy!
+torch.manual_seed(0)
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ num_inference_steps=50, height=1024, width=1024,
+)
+image.save("image_1024.png")
+
+# Highres fix
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ input_image=image.resize((2048, 2048)),
+ num_inference_steps=50, height=2048, width=2048,
+ denoising_strength=0.4, tiled=True,
+)
+image.save("image_2048.png")
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/kolors_text_to_image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/kolors_text_to_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8fefdb7e5ace6408bdc9f086dd779e9e0a133e8
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/kolors_text_to_image.py
@@ -0,0 +1,38 @@
+from diffsynth import ModelManager, SDXLImagePipeline, download_models
+import torch
+
+# Download models
+# https://huggingface.co/Kwai-Kolors/Kolors
+download_models(["Kolors"])
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
+ file_path_list=[
+ "models/kolors/Kolors/text_encoder",
+ "models/kolors/Kolors/unet/diffusion_pytorch_model.safetensors",
+ "models/kolors/Kolors/vae/diffusion_pytorch_model.safetensors"
+ ])
+pipe = SDXLImagePipeline.from_model_manager(model_manager)
+
+# Optional (Int4 quantize) pip install cpm_kernels
+# pipe.text_encoder_kolors = pipe.text_encoder_kolors.quantize(4)
+# torch.cuda.empty_cache()
+
+prompt = "一幅充满诗意美感的全身画,泛红的肤色,画中一位银色长发、蓝色眼睛、肤色红润、身穿蓝色吊带连衣裙的少女漂浮在水下,面向镜头,周围是光彩的气泡,和煦的阳光透过水面折射进水下"
+negative_prompt = "半身,苍白的肤色,蜡黄的肤色,尸体,错误的眼睛,糟糕的人脸,毁容,糟糕的艺术,变形,多余的肢体,模糊的颜色,模糊,重复,病态,残缺,错误的手指,口红,腮红"
+
+torch.manual_seed(7)
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ num_inference_steps=50,
+ cfg_scale=4,
+)
+image.save(f"image_1024.jpg")
+
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ input_image=image.resize((2048, 2048)), denoising_strength=0.4, height=2048, width=2048,
+ num_inference_steps=50,
+ cfg_scale=4,
+)
+image.save("image_2048.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/kolors_with_sdxl_models.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/kolors_with_sdxl_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..946d65ec59a88dd61b360b9e92f278f8ca5621f7
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/kolors_with_sdxl_models.py
@@ -0,0 +1,68 @@
+from diffsynth import ModelManager, SDXLImagePipeline, download_models, ControlNetConfigUnit
+import torch
+
+
+
+def run_kolors_with_controlnet():
+ download_models(["Kolors", "ControlNet_union_sdxl_promax"])
+ model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
+ file_path_list=[
+ "models/kolors/Kolors/text_encoder",
+ "models/kolors/Kolors/unet/diffusion_pytorch_model.safetensors",
+ "models/kolors/Kolors/vae/diffusion_pytorch_model.safetensors",
+ "models/ControlNet/controlnet_union/diffusion_pytorch_model_promax.safetensors",
+ ])
+ pipe = SDXLImagePipeline.from_model_manager(model_manager, controlnet_config_units=[
+ ControlNetConfigUnit("depth", "models/ControlNet/controlnet_union/diffusion_pytorch_model_promax.safetensors", 0.6)
+ ])
+ negative_prompt = "半身,苍白的肤色,蜡黄的肤色,尸体,错误的眼睛,糟糕的人脸,毁容,糟糕的艺术,变形,多余的肢体,模糊的颜色,模糊,重复,病态,残缺,错误的手指,口红,腮红"
+
+ prompt = "一幅充满诗意美感的全身画,泛红的肤色,画中一位银色长发、蓝色眼睛、肤色红润、身穿蓝色吊带连衣裙的少女漂浮在水下,面向镜头,周围是光彩的气泡,和煦的阳光透过水面折射进水下"
+ torch.manual_seed(7)
+ image = pipe(
+ prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=50, cfg_scale=4,
+ )
+ image.save("image.jpg")
+
+ prompt = "一幅充满诗意美感的全身画,泛红的肤色,画中一位银色长发、黑色眼睛、肤色红润、身穿蓝色吊带连衣裙的少女,面向镜头,周围是绚烂的火焰"
+ torch.manual_seed(0)
+ image_controlnet = pipe(
+ prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=50, cfg_scale=4,
+ controlnet_image=image,
+ )
+ image_controlnet.save("image_depth_1.jpg")
+
+ prompt = "一幅充满诗意美感的全身画,画中一位皮肤白皙、黑色长发、黑色眼睛、身穿金色吊带连衣裙的少女,周围是闪电,画面明亮"
+ torch.manual_seed(1)
+ image_controlnet = pipe(
+ prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=50, cfg_scale=4,
+ controlnet_image=image,
+ )
+ image_controlnet.save("image_depth_2.jpg")
+
+
+
+def run_kolors_with_lora():
+ download_models(["Kolors", "SDXL_lora_zyd232_ChineseInkStyle_SDXL_v1_0"])
+ model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
+ file_path_list=[
+ "models/kolors/Kolors/text_encoder",
+ "models/kolors/Kolors/unet/diffusion_pytorch_model.safetensors",
+ "models/kolors/Kolors/vae/diffusion_pytorch_model.safetensors"
+ ])
+ model_manager.load_lora("models/lora/zyd232_ChineseInkStyle_SDXL_v1_0.safetensors", lora_alpha=1.5)
+ pipe = SDXLImagePipeline.from_model_manager(model_manager)
+
+ prompt = "一幅充满诗意美感的全身画,泛红的肤色,画中一位银色长发、蓝色眼睛、肤色红润、身穿蓝色吊带连衣裙的少女漂浮在水下,面向镜头,周围是光彩的气泡,和煦的阳光透过水面折射进水下"
+ negative_prompt = "半身,苍白的肤色,蜡黄的肤色,尸体,错误的眼睛,糟糕的人脸,毁容,糟糕的艺术,变形,多余的肢体,模糊的颜色,模糊,重复,病态,残缺,错误的手指,口红,腮红"
+
+ torch.manual_seed(7)
+ image = pipe(
+ prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=50, cfg_scale=4,
+ )
+ image.save("image_lora.jpg")
+
+
+
+run_kolors_with_controlnet()
+run_kolors_with_lora()
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/omnigen_text_to_image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/omnigen_text_to_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..42777531e4110fabff9380d8efc3b4b468e3e75c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/omnigen_text_to_image.py
@@ -0,0 +1,25 @@
+import torch
+from diffsynth import ModelManager, OmnigenImagePipeline
+
+
+model_manager = ModelManager(torch_dtype=torch.bfloat16, model_id_list=["OmniGen-v1"])
+pipe = OmnigenImagePipeline.from_model_manager(model_manager)
+
+image_man = pipe(
+ prompt="A portrait of a man.",
+ cfg_scale=2.5, num_inference_steps=50, seed=0
+)
+image_man.save("image_man.jpg")
+
+image_woman = pipe(
+ prompt="A portrait of an Asian woman with a white t-shirt.",
+ cfg_scale=2.5, num_inference_steps=50, seed=1
+)
+image_woman.save("image_woman.jpg")
+
+image_merged = pipe(
+ prompt="a man and a woman. The man is the man in
<|image_1|>. The woman is the woman in
<|image_2|>.",
+ reference_images=[image_man, image_woman],
+ cfg_scale=2.5, image_cfg_scale=2.5, num_inference_steps=50, seed=2
+)
+image_merged.save("image_merged.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sd35_text_to_image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sd35_text_to_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..94f59e0acbec5284d037bb0aae35aec4e726286e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sd35_text_to_image.py
@@ -0,0 +1,28 @@
+from diffsynth import ModelManager, SD3ImagePipeline
+import torch
+
+
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda", model_id_list=["StableDiffusion3.5-large"])
+pipe = SD3ImagePipeline.from_model_manager(model_manager)
+
+prompt = "a full body photo of a beautiful Asian girl. CG, masterpiece, best quality, solo, long hair, wavy hair, silver hair, blue eyes, blue dress, medium breasts, dress, underwater, air bubble, floating hair, refraction, portrait. The girl's flowing silver hair shimmers with every color of the rainbow and cascades down, merging with the floating flora around her."
+negative_prompt = "worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw,"
+
+torch.manual_seed(1)
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ cfg_scale=5,
+ num_inference_steps=100, width=1024, height=1024,
+)
+image.save("image_1024.jpg")
+
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ cfg_scale=5,
+ input_image=image.resize((2048, 2048)), denoising_strength=0.5,
+ num_inference_steps=50, width=2048, height=2048,
+ tiled=True
+)
+image.save("image_2048.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sd3_text_to_image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sd3_text_to_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..d00fcfaa65ce160cc14376db5355a0d01638e6e2
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sd3_text_to_image.py
@@ -0,0 +1,33 @@
+from diffsynth import ModelManager, SD3ImagePipeline, download_models
+import torch
+
+
+# Download models (automatically)
+# `models/stable_diffusion_3/sd3_medium_incl_clips.safetensors`: [link](https://huggingface.co/stabilityai/stable-diffusion-3-medium/resolve/main/sd3_medium_incl_clips.safetensors)
+download_models(["StableDiffusion3_without_T5"])
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
+ file_path_list=["models/stable_diffusion_3/sd3_medium_incl_clips.safetensors"])
+pipe = SD3ImagePipeline.from_model_manager(model_manager)
+
+
+prompt = "masterpiece, best quality, solo, long hair, wavy hair, silver hair, blue eyes, blue dress, medium breasts, dress, underwater, air bubble, floating hair, refraction, portrait,"
+negative_prompt = "worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw,"
+
+torch.manual_seed(7)
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ cfg_scale=7.5,
+ num_inference_steps=100, width=1024, height=1024,
+)
+image.save("image_1024.jpg")
+
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ cfg_scale=7.5,
+ input_image=image.resize((2048, 2048)), denoising_strength=0.5,
+ num_inference_steps=50, width=2048, height=2048,
+ tiled=True
+)
+image.save("image_2048.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sd_text_to_image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sd_text_to_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d553b2e6a680f312c2a0345fa3eb55ff76d7291
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sd_text_to_image.py
@@ -0,0 +1,75 @@
+from diffsynth import ModelManager, SDImagePipeline, ControlNetConfigUnit, download_models
+import torch
+
+
+# Download models (automatically)
+# `models/stable_diffusion/aingdiffusion_v12.safetensors`: [link](https://civitai.com/api/download/models/229575?type=Model&format=SafeTensor&size=full&fp=fp16)
+# `models/ControlNet/control_v11p_sd15_lineart.pth`: [link](https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart.pth)
+# `models/ControlNet/control_v11f1e_sd15_tile.pth`: [link](https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile.pth)
+# `models/Annotators/sk_model.pth`: [link](https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model.pth)
+# `models/Annotators/sk_model2.pth`: [link](https://huggingface.co/lllyasviel/Annotators/resolve/main/sk_model2.pth)
+download_models(["AingDiffusion_v12", "ControlNet_v11p_sd15_lineart", "ControlNet_v11f1e_sd15_tile"])
+
+
+# Load models
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
+ file_path_list=[
+ "models/stable_diffusion/aingdiffusion_v12.safetensors",
+ "models/ControlNet/control_v11f1e_sd15_tile.pth",
+ "models/ControlNet/control_v11p_sd15_lineart.pth"
+ ])
+pipe = SDImagePipeline.from_model_manager(
+ model_manager,
+ [
+ ControlNetConfigUnit(
+ processor_id="tile",
+ model_path=rf"models/ControlNet/control_v11f1e_sd15_tile.pth",
+ scale=0.5
+ ),
+ ControlNetConfigUnit(
+ processor_id="lineart",
+ model_path=rf"models/ControlNet/control_v11p_sd15_lineart.pth",
+ scale=0.7
+ ),
+ ]
+)
+
+prompt = "masterpiece, best quality, solo, long hair, wavy hair, silver hair, blue eyes, blue dress, medium breasts, dress, underwater, air bubble, floating hair, refraction, portrait,"
+negative_prompt = "worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw,"
+
+torch.manual_seed(0)
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ cfg_scale=7.5, clip_skip=1,
+ height=512, width=512, num_inference_steps=80,
+)
+image.save("512.jpg")
+
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ cfg_scale=7.5, clip_skip=1,
+ input_image=image.resize((1024, 1024)), controlnet_image=image.resize((1024, 1024)),
+ height=1024, width=1024, num_inference_steps=40, denoising_strength=0.7,
+)
+image.save("1024.jpg")
+
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ cfg_scale=7.5, clip_skip=1,
+ input_image=image.resize((2048, 2048)), controlnet_image=image.resize((2048, 2048)),
+ height=2048, width=2048, num_inference_steps=20, denoising_strength=0.7,
+)
+image.save("2048.jpg")
+
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ cfg_scale=7.5, clip_skip=1,
+ input_image=image.resize((4096, 4096)), controlnet_image=image.resize((4096, 4096)),
+ height=4096, width=4096, num_inference_steps=10, denoising_strength=0.5,
+ tiled=True, tile_size=128, tile_stride=64
+)
+image.save("4096.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sdxl_text_to_image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sdxl_text_to_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcae3abd3ab7b9f7850eaef9fc04d5ca862664c6
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sdxl_text_to_image.py
@@ -0,0 +1,34 @@
+from diffsynth import ModelManager, SDXLImagePipeline, download_models
+import torch
+
+
+# Download models (automatically)
+# `models/stable_diffusion_xl/bluePencilXL_v200.safetensors`: [link](https://civitai.com/api/download/models/245614?type=Model&format=SafeTensor&size=pruned&fp=fp16)
+download_models(["BluePencilXL_v200"])
+
+# Load models
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
+model_manager.load_models(["models/stable_diffusion_xl/bluePencilXL_v200.safetensors"])
+pipe = SDXLImagePipeline.from_model_manager(model_manager)
+
+prompt = "masterpiece, best quality, solo, long hair, wavy hair, silver hair, blue eyes, blue dress, medium breasts, dress, underwater, air bubble, floating hair, refraction, portrait,"
+negative_prompt = "worst quality, low quality, monochrome, zombie, interlocked fingers, Aissist, cleavage, nsfw,"
+
+torch.manual_seed(0)
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ cfg_scale=6,
+ height=1024, width=1024, num_inference_steps=60,
+)
+image.save("1024.jpg")
+
+image = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ cfg_scale=6,
+ input_image=image.resize((2048, 2048)),
+ height=2048, width=2048, num_inference_steps=60, denoising_strength=0.5
+)
+image.save("2048.jpg")
+
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sdxl_turbo.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sdxl_turbo.py
new file mode 100644
index 0000000000000000000000000000000000000000..c39fb088795c98860032bf7b6f5290e553809170
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/image_synthesis/sdxl_turbo.py
@@ -0,0 +1,31 @@
+from diffsynth import ModelManager, SDXLImagePipeline, download_models
+import torch
+
+
+# Download models (automatically)
+# `models/stable_diffusion_xl_turbo/sd_xl_turbo_1.0_fp16.safetensors`: [link](https://huggingface.co/stabilityai/sdxl-turbo/resolve/main/sd_xl_turbo_1.0_fp16.safetensors)
+download_models(["StableDiffusionXL_Turbo"])
+
+# Load models
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
+model_manager.load_models(["models/stable_diffusion_xl_turbo/sd_xl_turbo_1.0_fp16.safetensors"])
+pipe = SDXLImagePipeline.from_model_manager(model_manager)
+
+# Text to image
+torch.manual_seed(0)
+image = pipe(
+ prompt="black car",
+ # Do not modify the following parameters!
+ cfg_scale=1, height=512, width=512, num_inference_steps=1, progress_bar_cmd=lambda x:x
+)
+image.save(f"black_car.jpg")
+
+# Image to image
+torch.manual_seed(0)
+image = pipe(
+ prompt="red car",
+ input_image=image, denoising_strength=0.7,
+ # Do not modify the following parameters!
+ cfg_scale=1, height=512, width=512, num_inference_steps=1, progress_bar_cmd=lambda x:x
+)
+image.save(f"black_car_to_red_car.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/prompt_magic/README.md b/exp_code/1_benchmark/DiffSynth-Studio/examples/prompt_magic/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..cbb920eb875f8490ef300073f2b0dafdce6befab
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/prompt_magic/README.md
@@ -0,0 +1,34 @@
+# Prompt Refining
+
+Prompt refining is supported in DiffSynth-Studio.
+
+### Example: Qwen
+
+If you are not native English user, we provide LLM-based translation for you. Qwen is a typical example. See [`qwen_prompt_refining.py`](./qwen_prompt_refining.py).
+
+Prompt: "鹰". This prompt will be refined:
+
+* A majestic eagle soaring high above a vast expanse of open sky, its wings spread wide and its eyes fixed on the horizon.
+* A majestic eagle soaring high above the horizon, its wingspan stretching out towards the endless sky. Its sharp beak and powerful talons stand out against the azure sky, highlighting its strength and freedom. The eagle's silhouette is silhouetted against the fading sun, casting long shadows behind it.
+* A majestic eagle soaring high above a vast, open landscape, its wings spread wide and its beak pointed towards the sky. The sun casts long shadows across the ground, adding depth and texture to the image. The bird's feathers shimmer in the light, creating a sense of movement and power.
+* A majestic eagle soaring high above a vast, open landscape, its sharp talons gripping a fish effortlessly in its beak. The sun casts a warm golden glow behind it, casting long shadows across the barren earth below. The eagle's wingspan stretches out towards infinity, its feathers glistening in the light. Its eyes fixate on the distant horizon, as if sensing something important about to unfold.
+
+|seed=0|seed=1|seed=2|seed=3|
+|-|-|-|-|
+|||||
+
+### Example: OPUS + BeautifulPrompt
+
+Our prompter can translate other language to English and refine it using "BeautifulPrompt" models. Please see [`bf_prompt_refining.py`](./bf_prompt_refining.py) for more details.
+
+Prompt: "一个漂亮的女孩". The [translation model](https://huggingface.co/Helsinki-NLP/opus-mt-en-zh) will translate it to English.
+
+|seed=0|seed=1|seed=2|seed=3|
+|-|-|-|-|
+|||||
+
+Prompt: "一个漂亮的女孩". The [translation model](https://huggingface.co/Helsinki-NLP/opus-mt-en-zh) will translate it to English. Then the [refining model](https://huggingface.co/alibaba-pai/pai-bloom-1b1-text2prompt-sd) will refine the translated prompt for better visual quality.
+
+|seed=0|seed=1|seed=2|seed=3|
+|-|-|-|-|
+|||||
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/prompt_magic/bf_prompt_refining.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/prompt_magic/bf_prompt_refining.py
new file mode 100644
index 0000000000000000000000000000000000000000..911ed6832fba6a3e1bef6be3c74fe074dcdd0577
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/prompt_magic/bf_prompt_refining.py
@@ -0,0 +1,30 @@
+from diffsynth import ModelManager, SDXLImagePipeline, download_models, Translator, BeautifulPrompt
+import torch
+
+
+# Download models (automatically)
+# `models/stable_diffusion_xl/sd_xl_base_1.0.safetensors`: [link](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors)
+# `models/BeautifulPrompt/pai-bloom-1b1-text2prompt-sd/`: [link](https://huggingface.co/alibaba-pai/pai-bloom-1b1-text2prompt-sd)
+# `models/translator/opus-mt-zh-en/`: [link](https://huggingface.co/Helsinki-NLP/opus-mt-en-zh)
+download_models(["StableDiffusionXL_v1", "BeautifulPrompt", "opus-mt-zh-en"])
+
+# Load models
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
+model_manager.load_models([
+ "models/stable_diffusion_xl/sd_xl_base_1.0.safetensors",
+ "models/BeautifulPrompt/pai-bloom-1b1-text2prompt-sd",
+ "models/translator/opus-mt-zh-en"
+])
+pipe = SDXLImagePipeline.from_model_manager(model_manager, prompt_refiner_classes=[Translator, BeautifulPrompt])
+
+prompt = "一个漂亮的女孩"
+negative_prompt = ""
+
+for seed in range(4):
+ torch.manual_seed(seed)
+ image = pipe(
+ prompt=prompt, negative_prompt=negative_prompt,
+ height=1024, width=1024,
+ num_inference_steps=30
+ )
+ image.save(f"{seed}.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/prompt_magic/omost_flux_text_to_image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/prompt_magic/omost_flux_text_to_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6e5d1df41aebf8d9a46af077d458ccd741259ab
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/prompt_magic/omost_flux_text_to_image.py
@@ -0,0 +1,35 @@
+import torch
+from diffsynth import download_models, ModelManager, OmostPromter, FluxImagePipeline
+
+
+download_models(["OmostPrompt"])
+download_models(["FLUX.1-dev"])
+
+model_manager = ModelManager(torch_dtype=torch.bfloat16)
+model_manager.load_models([
+ "models/OmostPrompt/omost-llama-3-8b-4bits",
+ "models/FLUX/FLUX.1-dev/text_encoder/model.safetensors",
+ "models/FLUX/FLUX.1-dev/text_encoder_2",
+ "models/FLUX/FLUX.1-dev/ae.safetensors",
+ "models/FLUX/FLUX.1-dev/flux1-dev.safetensors"
+])
+
+pipe_omost = FluxImagePipeline.from_model_manager(model_manager, prompt_extender_classes=[OmostPromter])
+pipe = FluxImagePipeline.from_model_manager(model_manager)
+
+prompt = "A witch uses ice magic to fight against wild beasts"
+seed = 7
+
+torch.manual_seed(seed)
+image = pipe_omost(
+ prompt=prompt,
+ num_inference_steps=30, embedded_guidance=3.5
+)
+image.save(f"image_omost.jpg")
+
+torch.manual_seed(seed)
+image2= pipe(
+ prompt=prompt,
+ num_inference_steps=30, embedded_guidance=3.5
+)
+image2.save(f"image.jpg")
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/prompt_magic/qwen_prompt_refining.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/prompt_magic/qwen_prompt_refining.py
new file mode 100644
index 0000000000000000000000000000000000000000..511d506995d256dd9ca52ac2e248efa8f96ba3a4
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/prompt_magic/qwen_prompt_refining.py
@@ -0,0 +1,27 @@
+from diffsynth import ModelManager, FluxImagePipeline, download_models, QwenPrompt
+import torch
+
+
+download_models(["FLUX.1-dev", "QwenPrompt"])
+
+model_manager = ModelManager(torch_dtype=torch.bfloat16, device="cuda")
+model_manager.load_models([
+ "models/FLUX/FLUX.1-dev/text_encoder/model.safetensors",
+ "models/FLUX/FLUX.1-dev/text_encoder_2",
+ "models/FLUX/FLUX.1-dev/ae.safetensors",
+ "models/FLUX/FLUX.1-dev/flux1-dev.safetensors",
+ "models/QwenPrompt/qwen2-1.5b-instruct",
+])
+pipe = FluxImagePipeline.from_model_manager(model_manager, prompt_refiner_classes=[QwenPrompt])
+
+prompt = "鹰"
+negative_prompt = ""
+
+for seed in range(4):
+ torch.manual_seed(seed)
+ image = pipe(
+ prompt=prompt, negative_prompt=negative_prompt,
+ height=1024, width=1024,
+ num_inference_steps=30
+ )
+ image.save(f"{seed}.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/README.md b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..db1cb83854b082167d49660740d16f4c4e4a614c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/README.md
@@ -0,0 +1,399 @@
+# Qwen-Image
+
+[Switch to English](./README.md)
+
+Qwen-Image is an open-source image generation model developed by Tongyi Lab, Alibaba.
+
+## Installation
+
+Before using this model series, install DiffSynth-Studio from source code.
+
+```shell
+git clone https://github.com/modelscope/DiffSynth-Studio.git
+cd DiffSynth-Studio
+pip install -e .
+```
+
+## Quick Start
+
+Run the following code to quickly load the [Qwen/Qwen-Image](https://www.modelscope.cn/models/Qwen/Qwen-Image ) model and perform inference.
+
+```python
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+prompt = "A detailed portrait of a girl underwater, wearing a blue flowing dress, hair gently floating, clear light and shadow, surrounded by bubbles, calm expression, fine details, dreamy and beautiful."
+image = pipe(prompt, seed=0, num_inference_steps=40)
+image.save("image.jpg")
+```
+
+## Model Overview
+
+|Model ID|Inference|Low VRAM Inference|Full Training|Validation after Full Training|LoRA Training|Validation after LoRA Training|
+|-|-|-|-|-|-|-|
+|[Qwen/Qwen-Image](https://www.modelscope.cn/models/Qwen/Qwen-Image)|[code](./model_inference/Qwen-Image.py)|[code](./model_inference_low_vram/Qwen-Image.py)|[code](./model_training/full/Qwen-Image.sh)|[code](./model_training/validate_full/Qwen-Image.py)|[code](./model_training/lora/Qwen-Image.sh)|[code](./model_training/validate_lora/Qwen-Image.py)|
+|[Qwen/Qwen-Image-Edit](https://www.modelscope.cn/models/Qwen/Qwen-Image-Edit)|[code](./model_inference/Qwen-Image-Edit.py)|[code](./model_inference_low_vram/Qwen-Image-Edit.py)|[code](./model_training/full/Qwen-Image-Edit.sh)|[code](./model_training/validate_full/Qwen-Image-Edit.py)|[code](./model_training/lora/Qwen-Image-Edit.sh)|[code](./model_training/validate_lora/Qwen-Image-Edit.py)|
+|[DiffSynth-Studio/Qwen-Image-Distill-Full](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Distill-Full)|[code](./model_inference/Qwen-Image-Distill-Full.py)|[code](./model_inference_low_vram/Qwen-Image-Distill-Full.py)|[code](./model_training/full/Qwen-Image-Distill-Full.sh)|[code](./model_training/validate_full/Qwen-Image-Distill-Full.py)|[code](./model_training/lora/Qwen-Image-Distill-Full.sh)|[code](./model_training/validate_lora/Qwen-Image-Distill-Full.py)|
+|[DiffSynth-Studio/Qwen-Image-Distill-LoRA](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Distill-LoRA)|[code](./model_inference/Qwen-Image-Distill-LoRA.py)|[code](./model_inference_low_vram/Qwen-Image-Distill-LoRA.py)|-|-|-|-|
+|[DiffSynth-Studio/Qwen-Image-EliGen](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-EliGen)|[code](./model_inference/Qwen-Image-EliGen.py)|[code](./model_inference_low_vram/Qwen-Image-EliGen.py)|-|-|[code](./model_training/lora/Qwen-Image-EliGen.sh)|[code](./model_training/validate_lora/Qwen-Image-EliGen.py)|
+|[DiffSynth-Studio/Qwen-Image-EliGen-V2](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-EliGen-V2)|[code](./model_inference/Qwen-Image-EliGen-V2.py)|[code](./model_inference_low_vram/Qwen-Image-EliGen-V2.py)|-|-|[code](./model_training/lora/Qwen-Image-EliGen.sh)|[code](./model_training/validate_lora/Qwen-Image-EliGen.py)|
+|[DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Canny](https://modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Canny)|[code](./model_inference/Qwen-Image-Blockwise-ControlNet-Canny.py)|[code](./model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Canny.py)|[code](./model_training/full/Qwen-Image-Blockwise-ControlNet-Canny.sh)|[code](./model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Canny.py)|[code](./model_training/lora/Qwen-Image-Blockwise-ControlNet-Canny.sh)|[code](./model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Canny.py)|
+|[DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Depth](https://modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Depth)|[code](./model_inference/Qwen-Image-Blockwise-ControlNet-Depth.py)|[code](./model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Depth.py)|[code](./model_training/full/Qwen-Image-Blockwise-ControlNet-Depth.sh)|[code](./model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Depth.py)|[code](./model_training/lora/Qwen-Image-Blockwise-ControlNet-Depth.sh)|[code](./model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Depth.py)|
+|[DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Inpaint](https://modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Inpaint)|[code](./model_inference/Qwen-Image-Blockwise-ControlNet-Inpaint.py)|[code](./model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Inpaint.py)|[code](./model_training/full/Qwen-Image-Blockwise-ControlNet-Inpaint.sh)|[code](./model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Inpaint.py)|[code](./model_training/lora/Qwen-Image-Blockwise-ControlNet-Inpaint.sh)|[code](./model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Inpaint.py)|
+|[DiffSynth-Studio/Qwen-Image-In-Context-Control-Union](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-In-Context-Control-Union)|[code](./model_inference/Qwen-Image-In-Context-Control-Union.py)|[code](./model_inference_low_vram/Qwen-Image-In-Context-Control-Union.py)|-|-|[code](./model_training/lora/Qwen-Image-In-Context-Control-Union.sh)|[code](./model_training/validate_lora/Qwen-Image-In-Context-Control-Union.py)|
+|[DiffSynth-Studio/Qwen-Image-Edit-Lowres-Fix](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Edit-Lowres-Fix)|[code](./model_inference/Qwen-Image-Edit-Lowres-Fix.py)|[code](./model_inference_low_vram/Qwen-Image-Edit-Lowres-Fix.py)|-|-|-|-|
+
+## Model Inference
+
+The following section helps you understand our features and write inference code.
+
+
+
+Load Model
+
+Use `from_pretrained` to load the model:
+
+```python
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+```
+
+Here, `torch_dtype` and `device` set the computation precision and device. `model_configs` can be used in different ways to specify model paths:
+
+* Download the model from [ModelScope](https://modelscope.cn/ ) and load it. In this case, fill in `model_id` and `origin_file_pattern`, for example:
+
+```python
+ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+```
+
+* Load the model from a local file path. In this case, fill in `path`, for example:
+
+```python
+ModelConfig(path="models/xxx.safetensors")
+```
+
+For a single model loaded from multiple files, use a list, for example:
+
+```python
+ModelConfig(path=[
+ "models/Qwen/Qwen-Image/text_encoder/model-00001-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00002-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00003-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00004-of-00004.safetensors",
+])
+```
+
+`ModelConfig` provides extra options to control model loading behavior:
+
+* `local_model_path`: Path to save downloaded models. Default is `"./models"`.
+* `skip_download`: Whether to skip downloading. Default is `False`. If your network cannot access [ModelScope](https://modelscope.cn/ ), download the required files manually and set this to `True`.
+
+
+
+
+
+
+VRAM Management
+
+DiffSynth-Studio provides fine-grained VRAM management for the Qwen-Image model. This allows the model to run on devices with low VRAM. You can enable the offload feature using the code below. It moves some model parts to CPU memory when GPU memory is limited.
+
+```python
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+```
+
+FP8 quantization is also supported:
+
+```python
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+```
+
+You can use FP8 quantization and offload at the same time:
+
+```python
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+```
+
+FP8 quantization can greatly reduce VRAM use, but it does not speed up inference. Some models may have quality issues like blur, tearing, or distortion when using FP8. Use FP8 with care.
+
+After enabling VRAM management, the framework will automatically choose a memory strategy based on free VRAM. The `enable_vram_management` function has the following options to control this strategy:
+
+* `vram_limit`: VRAM usage limit in GB. By default, it uses all free VRAM on the device. Note that this is not a strict limit. If the set limit is too low but actual free VRAM is enough, the model will run with minimal VRAM use. Set it to 0 for the smallest possible VRAM use.
+* `vram_buffer`: VRAM buffer size in GB. Default is 0.5GB. A buffer is needed because large network layers may use more VRAM than expected during loading. The best value is the VRAM size of the largest model layer.
+* `num_persistent_param_in_dit`: Number of parameters to keep in VRAM in the DiT model. Default is no limit. This option will be removed in the future. Do not rely on it.
+* `enable_dit_fp8_computation`: Whether to enable FP8 computation in the DiT model. This is only applicable to GPUs that support FP8 operations (e.g., H200, etc.). Disabled by default.
+
+
+
+
+
+
+Inference Acceleration
+
+* FP8 Quantization: Choose the appropriate quantization method based on your hardware and requirements.
+ * GPUs that do not support FP8 computation (e.g., A100, 4090, etc.): FP8 quantization will only reduce VRAM usage without speeding up inference. Code: [./model_inference_low_vram/Qwen-Image.py](./model_inference_low_vram/Qwen-Image.py)
+ * GPUs that support FP8 operations (e.g., H200, etc.): Please install [Flash Attention 3](https://github.com/Dao-AILab/flash-attention). Otherwise, FP8 acceleration will only apply to Linear layers.
+ * Faster inference but higher VRAM usage: Use [./accelerate/Qwen-Image-FP8.py](./accelerate/Qwen-Image-FP8.py)
+ * Slightly slower inference but lower VRAM usage: Use [./accelerate/Qwen-Image-FP8-offload.py](./accelerate/Qwen-Image-FP8-offload.py)
+* Distillation acceleration: We trained two distillation models for fast inference at `cfg_scale=1` and `num_inference_steps=15`.
+ * [DiffSynth-Studio/Qwen-Image-Distill-Full](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Distill-Full): Full distillation version. Better image quality but lower LoRA compatibility. Use [./model_inference/Qwen-Image-Distill-Full.py](./model_inference/Qwen-Image-Distill-Full.py).
+ * [DiffSynth-Studio/Qwen-Image-Distill-LoRA](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Distill-LoRA): LoRA distillation version. Slightly lower image quality but better LoRA compatibility. Use [./model_inference/Qwen-Image-Distill-LoRA.py](./model_inference/Qwen-Image-Distill-LoRA.py).
+
+
+
+
+
+
+Input Parameters
+
+The pipeline supports the following input parameters during inference:
+
+* `prompt`: Text prompt that describes what should appear in the image.
+* `negative_prompt`: Negative prompt that describes what should not appear in the image. Default is `""`.
+* `cfg_scale`: Parameter for classifier-free guidance. Default is 1. It takes effect when set to a value greater than 1.
+* `input_image`: Input image for image-to-image generation. Used with `denoising_strength`.
+* `denoising_strength`: Denoising strength, range from 0 to 1. Default is 1. When close to 0, the output image is similar to the input. When close to 1, the output is more different. Do not set this to a non-1 value if `input_image` is not given.
+* `height`: Image height. Must be a multiple of 16.
+* `width`: Image width. Must be a multiple of 16.
+* `seed`: Random seed. Default is `None`, meaning fully random.
+* `rand_device`: Device for generating random noise. Default is `"cpu"`. Setting it to `"cuda"` may lead to different results on different GPUs.
+* `num_inference_steps`: Number of inference steps. Default is 30.
+* `tiled`: Whether to enable tiled VAE inference. Default is `False`. Set to `True` to reduce VRAM use in VAE encoding/decoding. This causes small errors and slightly longer inference time.
+* `tile_size`: Tile size for VAE encoding/decoding. Default is 128. Only works when `tiled=True`.
+* `tile_stride`: Tile stride for VAE encoding/decoding. Default is 64. Only works when `tiled=True`. Must be less than or equal to `tile_size`.
+* `progress_bar_cmd`: Progress bar display. Default is `tqdm.tqdm`. Set to `lambda x: x` to hide the progress bar.
+
+
+
+
+## Model Training
+
+The Qwen-Image series models are trained using a unified script [`./model_training/train.py`](./model_training/train.py).
+
+
+
+Script Parameters
+
+The script includes the following parameters:
+
+* Dataset
+ * `--dataset_base_path`: Root path of the dataset.
+ * `--dataset_metadata_path`: Path to the dataset metadata file.
+ * `--max_pixels`: Maximum pixel area. Default is 1024*1024. When dynamic resolution is enabled, any image with resolution higher than this will be resized down.
+ * `--height`: Height of image or video. Leave `height` and `width` empty to enable dynamic resolution.
+ * `--width`: Width of image or video. Leave `height` and `width` empty to enable dynamic resolution.
+ * `--data_file_keys`: Data file keys in metadata. Separate with commas.
+ * `--dataset_repeat`: Number of times the dataset repeats per epoch.
+ * `--dataset_num_workers`: Number of workers for data loading.
+* Model
+ * `--model_paths`: Model paths to load. In JSON format.
+ * `--model_id_with_origin_paths`: Model ID with original paths, e.g., Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors. Separate with commas.
+ * `--tokenizer_path`: Tokenizer path. Leave empty to auto-download.
+ * `--processor_path`: Path to the processor of Qwen-Image-Edit. Leave empty to auto-download.
+* Training
+ * `--learning_rate`: Learning rate.
+ * `--weight_decay`: Weight decay.
+ * `--num_epochs`: Number of epochs.
+ * `--output_path`: Save path.
+ * `--remove_prefix_in_ckpt`: Remove prefix in checkpoint.
+ * `--save_steps`: Number of checkpoint saving invervals. If None, checkpoints will be saved every epoch.
+ * `--find_unused_parameters`: Whether to find unused parameters in DDP.
+* Trainable Modules
+ * `--trainable_models`: Models to train, e.g., dit, vae, text_encoder.
+ * `--lora_base_model`: Which model to add LoRA to.
+ * `--lora_target_modules`: Which layers to add LoRA to.
+ * `--lora_rank`: Rank of LoRA.
+ * `--lora_checkpoint`: Path to the LoRA checkpoint. If provided, LoRA will be loaded from this checkpoint.
+* Extra Model Inputs
+ * `--extra_inputs`: Extra model inputs, separated by commas.
+* VRAM Management
+ * `--use_gradient_checkpointing`: Whether to enable gradient checkpointing.
+ * `--use_gradient_checkpointing_offload`: Whether to offload gradient checkpointing to CPU memory.
+ * `--gradient_accumulation_steps`: Number of gradient accumulation steps.
+
+In addition, the training framework is built on [`accelerate`](https://huggingface.co/docs/accelerate/index). Run `accelerate config` before training to set GPU-related settings. For some training tasks (e.g., full training of 20B model), we provide suggested `accelerate` config files. Check the corresponding training script for details.
+
+
+
+
+
+
+Step 1: Prepare Dataset
+
+The dataset contains a set of files. We suggest organizing your dataset like this:
+
+```
+data/example_image_dataset/
+├── metadata.csv
+├── image1.jpg
+└── image2.jpg
+```
+
+Here, `image1.jpg` and `image2.jpg` are image files for training, and `metadata.csv` is a metadata list, for example:
+
+```
+image,prompt
+image1.jpg,"a cat is sleeping"
+image2.jpg,"a dog is running"
+```
+
+We have built a sample image dataset for your testing. Use the following command to download it:
+
+```shell
+modelscope download --dataset DiffSynth-Studio/example_image_dataset --local_dir ./data/example_image_dataset
+```
+
+The dataset supports multiple image formats: `"jpg", "jpeg", "png", "webp"`.
+
+Image size can be controlled by script parameters `--height` and `--width`. When `--height` and `--width` are empty, dynamic resolution is enabled. Images will be trained using their original sizes.
+
+**We strongly recommend using fixed resolution for training, as multi-GPU training may have load balancing issues with dynamic resolution.**
+
+
+
+
+
+
+Step 2: Load Model
+
+Similar to model loading during inference, you can set the model to load directly by model ID. For example, during inference we load the model like this:
+
+```python
+model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+]
+```
+
+Then during training, use the following parameter to load the same models:
+
+```shell
+--model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors"
+```
+
+If you want to load the model from local files, for example, during inference:
+
+```python
+model_configs=[
+ ModelConfig([
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00001-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00002-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00003-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00004-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00005-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00006-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00007-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00008-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00009-of-00009.safetensors"
+ ]),
+ ModelConfig([
+ "models/Qwen/Qwen-Image/text_encoder/model-00001-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00002-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00003-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00004-of-00004.safetensors"
+ ]),
+ ModelConfig("models/Qwen/Qwen-Image/vae/diffusion_pytorch_model.safetensors")
+]
+```
+
+Then during training, set it as:
+
+```shell
+--model_paths '[
+ [
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00001-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00002-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00003-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00004-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00005-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00006-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00007-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00008-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00009-of-00009.safetensors"
+ ],
+ [
+ "models/Qwen/Qwen-Image/text_encoder/model-00001-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00002-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00003-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00004-of-00004.safetensors"
+ ],
+ "models/Qwen/Qwen-Image/vae/diffusion_pytorch_model.safetensors"
+]' \
+```
+
+
+
+
+
+
+Step 3: Set Trainable Modules
+
+The training framework supports training base models or LoRA models. Here are some examples:
+
+* Full training of DiT part: `--trainable_models dit`
+* Train LoRA on DiT part: `--lora_base_model dit --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" --lora_rank 32`
+
+Also, since the training script loads multiple modules (text encoder, dit, vae), you need to remove prefixes when saving model files. For example, when fully training the DiT part or training LoRA on DiT, set `--remove_prefix_in_ckpt pipe.dit.`
+
+
+
+
+
+
+Step 4: Start Training
+
+We have written training commands for each model. Please refer to the table at the start of this document.
+
+
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/README_zh.md b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/README_zh.md
new file mode 100644
index 0000000000000000000000000000000000000000..db958a7e81bc18a386fcfbd2b58d8998deb1253d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/README_zh.md
@@ -0,0 +1,399 @@
+# Qwen-Image
+
+[Switch to English](./README.md)
+
+Qwen-Image 是由阿里巴巴通义实验室开源的图像生成模型。
+
+## 安装
+
+在使用本系列模型之前,请通过源码安装 DiffSynth-Studio。
+
+```shell
+git clone https://github.com/modelscope/DiffSynth-Studio.git
+cd DiffSynth-Studio
+pip install -e .
+```
+
+## 快速开始
+
+通过运行以下代码可以快速加载 [Qwen/Qwen-Image](https://www.modelscope.cn/models/Qwen/Qwen-Image) 模型并进行推理
+
+```python
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(prompt, seed=0, num_inference_steps=40)
+image.save("image.jpg")
+```
+
+## 模型总览
+
+|模型 ID|推理|低显存推理|全量训练|全量训练后验证|LoRA 训练|LoRA 训练后验证|
+|-|-|-|-|-|-|-|
+|[Qwen/Qwen-Image](https://www.modelscope.cn/models/Qwen/Qwen-Image)|[code](./model_inference/Qwen-Image.py)|[code](./model_inference_low_vram/Qwen-Image.py)|[code](./model_training/full/Qwen-Image.sh)|[code](./model_training/validate_full/Qwen-Image.py)|[code](./model_training/lora/Qwen-Image.sh)|[code](./model_training/validate_lora/Qwen-Image.py)|
+|[Qwen/Qwen-Image-Edit](https://www.modelscope.cn/models/Qwen/Qwen-Image-Edit)|[code](./model_inference/Qwen-Image-Edit.py)|[code](./model_inference_low_vram/Qwen-Image-Edit.py)|[code](./model_training/full/Qwen-Image-Edit.sh)|[code](./model_training/validate_full/Qwen-Image-Edit.py)|[code](./model_training/lora/Qwen-Image-Edit.sh)|[code](./model_training/validate_lora/Qwen-Image-Edit.py)|
+|[DiffSynth-Studio/Qwen-Image-Distill-Full](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Distill-Full)|[code](./model_inference/Qwen-Image-Distill-Full.py)|[code](./model_inference_low_vram/Qwen-Image-Distill-Full.py)|[code](./model_training/full/Qwen-Image-Distill-Full.sh)|[code](./model_training/validate_full/Qwen-Image-Distill-Full.py)|[code](./model_training/lora/Qwen-Image-Distill-Full.sh)|[code](./model_training/validate_lora/Qwen-Image-Distill-Full.py)|
+|[DiffSynth-Studio/Qwen-Image-Distill-LoRA](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Distill-LoRA)|[code](./model_inference/Qwen-Image-Distill-LoRA.py)|[code](./model_inference_low_vram/Qwen-Image-Distill-LoRA.py)|-|-|-|-|
+|[DiffSynth-Studio/Qwen-Image-EliGen](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-EliGen)|[code](./model_inference/Qwen-Image-EliGen.py)|[code](./model_inference_low_vram/Qwen-Image-EliGen.py)|-|-|[code](./model_training/lora/Qwen-Image-EliGen.sh)|[code](./model_training/validate_lora/Qwen-Image-EliGen.py)|
+|[DiffSynth-Studio/Qwen-Image-EliGen-V2](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-EliGen-V2)|[code](./model_inference/Qwen-Image-EliGen-V2.py)|[code](./model_inference_low_vram/Qwen-Image-EliGen-V2.py)|-|-|[code](./model_training/lora/Qwen-Image-EliGen.sh)|[code](./model_training/validate_lora/Qwen-Image-EliGen.py)|
+|[DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Canny](https://modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Canny)|[code](./model_inference/Qwen-Image-Blockwise-ControlNet-Canny.py)|[code](./model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Canny.py)|[code](./model_training/full/Qwen-Image-Blockwise-ControlNet-Canny.sh)|[code](./model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Canny.py)|[code](./model_training/lora/Qwen-Image-Blockwise-ControlNet-Canny.sh)|[code](./model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Canny.py)|
+|[DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Depth](https://modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Depth)|[code](./model_inference/Qwen-Image-Blockwise-ControlNet-Depth.py)|[code](./model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Depth.py)|[code](./model_training/full/Qwen-Image-Blockwise-ControlNet-Depth.sh)|[code](./model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Depth.py)|[code](./model_training/lora/Qwen-Image-Blockwise-ControlNet-Depth.sh)|[code](./model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Depth.py)|
+|[DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Inpaint](https://modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Inpaint)|[code](./model_inference/Qwen-Image-Blockwise-ControlNet-Inpaint.py)|[code](./model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Inpaint.py)|[code](./model_training/full/Qwen-Image-Blockwise-ControlNet-Inpaint.sh)|[code](./model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Inpaint.py)|[code](./model_training/lora/Qwen-Image-Blockwise-ControlNet-Inpaint.sh)|[code](./model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Inpaint.py)|
+|[DiffSynth-Studio/Qwen-Image-In-Context-Control-Union](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-In-Context-Control-Union)|[code](./model_inference/Qwen-Image-In-Context-Control-Union.py)|[code](./model_inference_low_vram/Qwen-Image-In-Context-Control-Union.py)|-|-|[code](./model_training/lora/Qwen-Image-In-Context-Control-Union.sh)|[code](./model_training/validate_lora/Qwen-Image-In-Context-Control-Union.py)|
+|[DiffSynth-Studio/Qwen-Image-Edit-Lowres-Fix](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Edit-Lowres-Fix)|[code](./model_inference/Qwen-Image-Edit-Lowres-Fix.py)|[code](./model_inference_low_vram/Qwen-Image-Edit-Lowres-Fix.py)|-|-|-|-|
+
+## 模型推理
+
+以下部分将会帮助您理解我们的功能并编写推理代码。
+
+
+
+加载模型
+
+模型通过 `from_pretrained` 加载:
+
+```python
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+```
+
+其中 `torch_dtype` 和 `device` 是计算精度和计算设备。`model_configs` 可通过多种方式配置模型路径:
+
+* 从[魔搭社区](https://modelscope.cn/)下载模型并加载。此时需要填写 `model_id` 和 `origin_file_pattern`,例如
+
+```python
+ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+```
+
+* 从本地文件路径加载模型。此时需要填写 `path`,例如
+
+```python
+ModelConfig(path="models/xxx.safetensors")
+```
+
+对于从多个文件加载的单一模型,使用列表即可,例如
+
+```python
+ModelConfig(path=[
+ "models/Qwen/Qwen-Image/text_encoder/model-00001-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00002-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00003-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00004-of-00004.safetensors",
+])
+```
+
+`ModelConfig` 提供了额外的参数用于控制模型加载时的行为:
+
+* `local_model_path`: 用于保存下载模型的路径,默认值为 `"./models"`。
+* `skip_download`: 是否跳过下载,默认值为 `False`。当您的网络无法访问[魔搭社区](https://modelscope.cn/)时,请手动下载必要的文件,并将其设置为 `True`。
+
+
+
+
+
+
+显存管理
+
+DiffSynth-Studio 为 Qwen-Image 模型提供了细粒度的显存管理,让模型能够在低显存设备上进行推理,可通过以下代码开启 offload 功能,在显存有限的设备上将部分模块 offload 到内存中。
+
+```python
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+```
+
+FP8 量化功能也是支持的:
+
+```python
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+```
+
+FP8 量化和 offload 可同时开启:
+
+```python
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+```
+
+FP8 量化能够大幅度减少显存占用,但不会加速,部分模型在 FP8 量化下会出现精度不足导致的画面模糊、撕裂、失真问题,请谨慎使用 FP8 量化。
+
+开启显存管理后,框架会自动根据设备上的剩余显存确定显存管理策略。`enable_vram_management` 函数提供了以下参数,用于手动控制显存管理策略:
+
+* `vram_limit`: 显存占用量限制(GB),默认占用设备上的剩余显存。注意这不是一个绝对限制,当设置的显存不足以支持模型进行推理,但实际可用显存足够时,将会以最小化显存占用的形式进行推理。将其设置为0时,将会实现理论最小显存占用。
+* `vram_buffer`: 显存缓冲区大小(GB),默认为 0.5GB。由于部分较大的神经网络层在 onload 阶段会不可控地占用更多显存,因此一个显存缓冲区是必要的,理论上的最优值为模型中最大的层所占的显存。
+* `num_persistent_param_in_dit`: DiT 模型中常驻显存的参数数量(个),默认为无限制。我们将会在未来删除这个参数,请不要依赖这个参数。
+* `enable_dit_fp8_computation`: 是否启用 DiT 模型中的 FP8 计算,仅适用于支持 FP8 运算的 GPU(例如 H200 等),默认不启用。
+
+
+
+
+
+
+推理加速
+
+* FP8 量化:根据您的硬件与需求,请选择合适的量化方式
+ * GPU 不支持 FP8 计算(例如 A100、4090 等):FP8 量化仅能降低显存占用,无法加速,代码:[./model_inference_low_vram/Qwen-Image.py](./model_inference_low_vram/Qwen-Image.py)
+ * GPU 支持 FP8 运算(例如 H200 等):请安装 [Flash Attention 3](https://github.com/Dao-AILab/flash-attention),否则 FP8 加速仅对 Linear 层生效
+ * 更快的速度,但更大的显存:请使用 [./accelerate/Qwen-Image-FP8.py](./accelerate/Qwen-Image-FP8.py)
+ * 稍慢的速度,但更小的显存:请使用 [./accelerate/Qwen-Image-FP8-offload.py](./accelerate/Qwen-Image-FP8-offload.py)
+* 蒸馏加速:我们训练了两个蒸馏加速模型,可以在 `cfg_scale=1` 和 `num_inference_steps=15` 设置下进行快速推理
+ * [DiffSynth-Studio/Qwen-Image-Distill-Full](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Distill-Full):全量蒸馏训练版本,更好的生成效果,稍差的 LoRA 兼容性,请使用 [./model_inference/Qwen-Image-Distill-Full.py](./model_inference/Qwen-Image-Distill-Full.py)
+ * [DiffSynth-Studio/Qwen-Image-Distill-LoRA](https://www.modelscope.cn/models/DiffSynth-Studio/Qwen-Image-Distill-LoRA):LoRA 蒸馏训练版本,稍差的生成效果,更好的 LoRA 兼容性,请使用 [./model_inference/Qwen-Image-Distill-LoRA.py](./model_inference/Qwen-Image-Distill-LoRA.py)
+
+
+
+
+
+
+输入参数
+
+Pipeline 在推理阶段能够接收以下输入参数:
+
+* `prompt`: 提示词,描述画面中出现的内容。
+* `negative_prompt`: 负向提示词,描述画面中不应该出现的内容,默认值为 `""`。
+* `cfg_scale`: Classifier-free guidance 的参数,默认值为 1,当设置为大于1的数值时生效。
+* `input_image`: 输入图像,用于图生图,该参数与 `denoising_strength` 配合使用。
+* `denoising_strength`: 去噪强度,范围是 0~1,默认值为 1,当数值接近 0 时,生成图像与输入图像相似;当数值接近 1 时,生成图像与输入图像相差更大。在不输入 `input_image` 参数时,请不要将其设置为非 1 的数值。
+* `height`: 图像高度,需保证高度为 16 的倍数。
+* `width`: 图像宽度,需保证宽度为 16 的倍数。
+* `seed`: 随机种子。默认为 `None`,即完全随机。
+* `rand_device`: 生成随机高斯噪声矩阵的计算设备,默认为 `"cpu"`。当设置为 `cuda` 时,在不同 GPU 上会导致不同的生成结果。
+* `num_inference_steps`: 推理次数,默认值为 30。
+* `tiled`: 是否启用 VAE 分块推理,默认为 `False`。设置为 `True` 时可显著减少 VAE 编解码阶段的显存占用,会产生少许误差,以及少量推理时间延长。
+* `tile_size`: VAE 编解码阶段的分块大小,默认为 128,仅在 `tiled=True` 时生效。
+* `tile_stride`: VAE 编解码阶段的分块步长,默认为 64,仅在 `tiled=True` 时生效,需保证其数值小于或等于 `tile_size`。
+* `progress_bar_cmd`: 进度条,默认为 `tqdm.tqdm`。可通过设置为 `lambda x:x` 来屏蔽进度条。
+
+
+
+
+## 模型训练
+
+Qwen-Image 系列模型训练通过统一的 [`./model_training/train.py`](./model_training/train.py) 脚本进行。
+
+
+
+脚本参数
+
+脚本包含以下参数:
+
+* 数据集
+ * `--dataset_base_path`: 数据集的根路径。
+ * `--dataset_metadata_path`: 数据集的元数据文件路径。
+ * `--max_pixels`: 最大像素面积,默认为 1024*1024,当启用动态分辨率时,任何分辨率大于这个数值的图片都会被缩小。
+ * `--height`: 图像或视频的高度。将 `height` 和 `width` 留空以启用动态分辨率。
+ * `--width`: 图像或视频的宽度。将 `height` 和 `width` 留空以启用动态分辨率。
+ * `--data_file_keys`: 元数据中的数据文件键。用逗号分隔。
+ * `--dataset_repeat`: 每个 epoch 中数据集重复的次数。
+ * `--dataset_num_workers`: 每个 Dataloder 的进程数量。
+* 模型
+ * `--model_paths`: 要加载的模型路径。JSON 格式。
+ * `--model_id_with_origin_paths`: 带原始路径的模型 ID,例如 Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors。用逗号分隔。
+ * `--tokenizer_path`: tokenizer 路径,留空将会自动下载。
+ * `--processor_path`:Qwen-Image-Edit 的 processor 路径。留空则自动下载。
+* 训练
+ * `--learning_rate`: 学习率。
+ * `--weight_decay`:权重衰减大小。
+ * `--num_epochs`: 轮数(Epoch)。
+ * `--output_path`: 保存路径。
+ * `--remove_prefix_in_ckpt`: 在 ckpt 中移除前缀。
+ * `--save_steps`: 保存模型的间隔 step 数量,如果设置为 None ,则每个 epoch 保存一次
+ * `--find_unused_parameters`: DDP 训练中是否存在未使用的参数
+* 可训练模块
+ * `--trainable_models`: 可训练的模型,例如 dit、vae、text_encoder。
+ * `--lora_base_model`: LoRA 添加到哪个模型上。
+ * `--lora_target_modules`: LoRA 添加到哪一层上。
+ * `--lora_rank`: LoRA 的秩(Rank)。
+ * `--lora_checkpoint`: LoRA 检查点的路径。如果提供此路径,LoRA 将从此检查点加载。
+* 额外模型输入
+ * `--extra_inputs`: 额外的模型输入,以逗号分隔。
+* 显存管理
+ * `--use_gradient_checkpointing`: 是否启用 gradient checkpointing。
+ * `--use_gradient_checkpointing_offload`: 是否将 gradient checkpointing 卸载到内存中。
+ * `--gradient_accumulation_steps`: 梯度累积步数。
+
+此外,训练框架基于 [`accelerate`](https://huggingface.co/docs/accelerate/index) 构建,在开始训练前运行 `accelerate config` 可配置 GPU 的相关参数。对于部分模型训练(例如 20B 模型的全量训练)脚本,我们提供了建议的 `accelerate` 配置文件,可在对应的训练脚本中查看。
+
+
+
+
+
+
+Step 1: 准备数据集
+
+数据集包含一系列文件,我们建议您这样组织数据集文件:
+
+```
+data/example_image_dataset/
+├── metadata.csv
+├── image1.jpg
+└── image2.jpg
+```
+
+其中 `image1.jpg`、`image2.jpg` 为训练用图像数据,`metadata.csv` 为元数据列表,例如
+
+```
+image,prompt
+image1.jpg,"a cat is sleeping"
+image2.jpg,"a dog is running"
+```
+
+我们构建了一个样例图像数据集,以方便您进行测试,通过以下命令可以下载这个数据集:
+
+```shell
+modelscope download --dataset DiffSynth-Studio/example_image_dataset --local_dir ./data/example_image_dataset
+```
+
+数据集支持多种图片格式,`"jpg", "jpeg", "png", "webp"`。
+
+图片的尺寸可通过脚本参数 `--height`、`--width` 控制。当 `--height` 和 `--width` 为空时将会开启动态分辨率,按照数据集中每个图像的实际宽高训练。
+
+**我们强烈建议使用固定分辨率训练,因为在多卡训练中存在负载均衡问题。**
+
+
+
+
+
+
+Step 2: 加载模型
+
+类似于推理时的模型加载逻辑,可直接通过模型 ID 配置要加载的模型。例如,推理时我们通过以下设置加载模型
+
+```python
+model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+]
+```
+
+那么在训练时,填入以下参数即可加载对应的模型。
+
+```shell
+--model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors"
+```
+
+如果您希望从本地文件加载模型,例如推理时
+
+```python
+model_configs=[
+ ModelConfig([
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00001-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00002-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00003-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00004-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00005-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00006-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00007-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00008-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00009-of-00009.safetensors"
+ ]),
+ ModelConfig([
+ "models/Qwen/Qwen-Image/text_encoder/model-00001-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00002-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00003-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00004-of-00004.safetensors"
+ ]),
+ ModelConfig("models/Qwen/Qwen-Image/vae/diffusion_pytorch_model.safetensors")
+]
+```
+
+那么训练时需设置为
+
+```shell
+--model_paths '[
+ [
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00001-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00002-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00003-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00004-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00005-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00006-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00007-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00008-of-00009.safetensors",
+ "models/Qwen/Qwen-Image/transformer/diffusion_pytorch_model-00009-of-00009.safetensors"
+ ],
+ [
+ "models/Qwen/Qwen-Image/text_encoder/model-00001-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00002-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00003-of-00004.safetensors",
+ "models/Qwen/Qwen-Image/text_encoder/model-00004-of-00004.safetensors"
+ ],
+ "models/Qwen/Qwen-Image/vae/diffusion_pytorch_model.safetensors"
+]' \
+```
+
+
+
+
+
+
+Step 3: 设置可训练模块
+
+训练框架支持训练基础模型,或 LoRA 模型。以下是几个例子:
+
+* 全量训练 DiT 部分:`--trainable_models dit`
+* 训练 DiT 部分的 LoRA 模型:`--lora_base_model dit --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" --lora_rank 32`
+
+此外,由于训练脚本中加载了多个模块(text encoder、dit、vae),保存模型文件时需要移除前缀,例如在全量训练 DiT 部分或者训练 DiT 部分的 LoRA 模型时,请设置 `--remove_prefix_in_ckpt pipe.dit.`
+
+
+
+
+
+
+Step 4: 启动训练程序
+
+我们为每一个模型编写了训练命令,请参考本文档开头的表格。
+
+
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/accelerate/Qwen-Image-FP8-offload.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/accelerate/Qwen-Image-FP8-offload.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d403162fcc2846daf03600ed0b86e57e667077e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/accelerate/Qwen-Image-FP8-offload.py
@@ -0,0 +1,18 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management(enable_dit_fp8_computation=True)
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(prompt, seed=0, num_inference_steps=40, enable_fp8_attention=True)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/accelerate/Qwen-Image-FP8.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/accelerate/Qwen-Image-FP8.py
new file mode 100644
index 0000000000000000000000000000000000000000..944140712e8b17ad2519a6563db1cfac33936525
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/accelerate/Qwen-Image-FP8.py
@@ -0,0 +1,51 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+from diffsynth.models.qwen_image_dit import RMSNorm
+from diffsynth.vram_management.layers import enable_vram_management, AutoWrappedLinear, AutoWrappedModule
+import torch
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+
+enable_vram_management(
+ pipe.dit,
+ module_map = {
+ RMSNorm: AutoWrappedModule,
+ },
+ module_config = dict(
+ offload_dtype=torch.bfloat16,
+ offload_device="cuda",
+ onload_dtype=torch.bfloat16,
+ onload_device="cuda",
+ computation_dtype=torch.bfloat16,
+ computation_device="cuda",
+ ),
+ vram_limit=None,
+)
+enable_vram_management(
+ pipe.dit,
+ module_map = {
+ torch.nn.Linear: AutoWrappedLinear,
+ },
+ module_config = dict(
+ offload_dtype=torch.float8_e4m3fn,
+ offload_device="cuda",
+ onload_dtype=torch.float8_e4m3fn,
+ onload_device="cuda",
+ computation_dtype=torch.float8_e4m3fn,
+ computation_device="cuda",
+ ),
+ vram_limit=None,
+)
+
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(prompt, seed=0, num_inference_steps=40, enable_fp8_attention=True)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Canny.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Canny.py
new file mode 100644
index 0000000000000000000000000000000000000000..85b9b96886a3a48bdf30319a99557a952721ab81
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Canny.py
@@ -0,0 +1,31 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
+from PIL import Image
+import torch
+from modelscope import dataset_snapshot_download
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Canny", origin_file_pattern="model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_image_dataset",
+ local_dir="./data/example_image_dataset",
+ allow_file_pattern="canny/image_1.jpg"
+)
+controlnet_image = Image.open("data/example_image_dataset/canny/image_1.jpg").resize((1328, 1328))
+
+prompt = "一只小狗,毛发光洁柔顺,眼神灵动,背景是樱花纷飞的春日庭院,唯美温馨。"
+image = pipe(
+ prompt, seed=0,
+ blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image)]
+)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Depth.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Depth.py
new file mode 100644
index 0000000000000000000000000000000000000000..6676868ad0e24d09d962211dda758f29cf7180f2
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Depth.py
@@ -0,0 +1,32 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
+from PIL import Image
+import torch
+from modelscope import dataset_snapshot_download
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Depth", origin_file_pattern="model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_image_dataset",
+ local_dir="./data/example_image_dataset",
+ allow_file_pattern="depth/image_1.jpg"
+)
+
+controlnet_image = Image.open("data/example_image_dataset/depth/image_1.jpg").resize((1328, 1328))
+
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(
+ prompt, seed=0,
+ blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image)]
+)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Inpaint.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Inpaint.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cb98e0a348ba5b9e9748496c05bc8fc8aef154e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Blockwise-ControlNet-Inpaint.py
@@ -0,0 +1,33 @@
+import torch
+from PIL import Image
+from modelscope import dataset_snapshot_download
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Inpaint", origin_file_pattern="model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_image_dataset",
+ local_dir="./data/example_image_dataset",
+ allow_file_pattern="inpaint/*.jpg"
+)
+prompt = "a cat with sunglasses"
+controlnet_image = Image.open("./data/example_image_dataset/inpaint/image_1.jpg").convert("RGB").resize((1328, 1328))
+inpaint_mask = Image.open("./data/example_image_dataset/inpaint/mask.jpg").convert("RGB").resize((1328, 1328))
+image = pipe(
+ prompt, seed=0,
+ input_image=controlnet_image, inpaint_mask=inpaint_mask,
+ blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image, inpaint_mask=inpaint_mask)],
+ num_inference_steps=40,
+)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Distill-Full.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Distill-Full.py
new file mode 100644
index 0000000000000000000000000000000000000000..c13a417f5aba55820b8111b872e8cf7be26e6ed1
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Distill-Full.py
@@ -0,0 +1,17 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Distill-Full", origin_file_pattern="diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(prompt, seed=0, num_inference_steps=15, cfg_scale=1)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Distill-LoRA.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Distill-LoRA.py
new file mode 100644
index 0000000000000000000000000000000000000000..aad1fdd7eb222e2d87fb57180c2ffb5d1db4c3ec
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Distill-LoRA.py
@@ -0,0 +1,20 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+from modelscope import snapshot_download
+import torch
+
+snapshot_download("DiffSynth-Studio/Qwen-Image-Distill-LoRA", local_dir="models/DiffSynth-Studio/Qwen-Image-Distill-LoRA")
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.load_lora(pipe.dit, "models/DiffSynth-Studio/Qwen-Image-Distill-LoRA/model.safetensors")
+
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(prompt, seed=0, num_inference_steps=15, cfg_scale=1)
+image.save("image.jpg")
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Edit-Lowres-Fix.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Edit-Lowres-Fix.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbe3b7e36bafffa2958d254c2399a6bdf4de05cf
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Edit-Lowres-Fix.py
@@ -0,0 +1,26 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+from modelscope import snapshot_download
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=None,
+ processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"),
+)
+snapshot_download("DiffSynth-Studio/Qwen-Image-Edit-Lowres-Fix", local_dir="models/DiffSynth-Studio/Qwen-Image-Edit-Lowres-Fix", allow_file_pattern="model.safetensors")
+pipe.load_lora(pipe.dit, "models/DiffSynth-Studio/Qwen-Image-Edit-Lowres-Fix/model.safetensors")
+
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(prompt=prompt, seed=0, num_inference_steps=40, height=1024, width=768)
+image.save("image.jpg")
+
+prompt = "将裙子变成粉色"
+image = image.resize((512, 384))
+image = pipe(prompt, edit_image=image, seed=1, num_inference_steps=40, height=1024, width=768, edit_rope_interpolation=True, edit_image_auto_resize=False)
+image.save(f"image2.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Edit.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Edit.py
new file mode 100644
index 0000000000000000000000000000000000000000..39d39a616e53db6e11f946b823f33577f70c883c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-Edit.py
@@ -0,0 +1,26 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=None,
+ processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"),
+)
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+input_image = pipe(prompt=prompt, seed=0, num_inference_steps=40, height=1328, width=1024)
+input_image.save("image1.jpg")
+
+prompt = "将裙子改为粉色"
+# edit_image_auto_resize=True: auto resize input image to match the area of 1024*1024 with the original aspect ratio
+image = pipe(prompt, edit_image=input_image, seed=1, num_inference_steps=40, height=1328, width=1024, edit_image_auto_resize=True)
+image.save(f"image2.jpg")
+
+# edit_image_auto_resize=False: do not resize input image
+image = pipe(prompt, edit_image=input_image, seed=1, num_inference_steps=40, height=1328, width=1024, edit_image_auto_resize=False)
+image.save(f"image3.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-EliGen-V2.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-EliGen-V2.py
new file mode 100644
index 0000000000000000000000000000000000000000..82bab2d0236c058216f23406be1220db3096cdfc
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-EliGen-V2.py
@@ -0,0 +1,106 @@
+import torch
+import random
+from PIL import Image, ImageDraw, ImageFont
+from modelscope import dataset_snapshot_download, snapshot_download
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+
+def visualize_masks(image, masks, mask_prompts, output_path, font_size=35, use_random_colors=False):
+ # Create a blank image for overlays
+ overlay = Image.new('RGBA', image.size, (0, 0, 0, 0))
+
+ colors = [
+ (165, 238, 173, 80),
+ (76, 102, 221, 80),
+ (221, 160, 77, 80),
+ (204, 93, 71, 80),
+ (145, 187, 149, 80),
+ (134, 141, 172, 80),
+ (157, 137, 109, 80),
+ (153, 104, 95, 80),
+ (165, 238, 173, 80),
+ (76, 102, 221, 80),
+ (221, 160, 77, 80),
+ (204, 93, 71, 80),
+ (145, 187, 149, 80),
+ (134, 141, 172, 80),
+ (157, 137, 109, 80),
+ (153, 104, 95, 80),
+ ]
+ # Generate random colors for each mask
+ if use_random_colors:
+ colors = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 80) for _ in range(len(masks))]
+
+ # Font settings
+ try:
+ font = ImageFont.truetype("wqy-zenhei.ttc", font_size) # Adjust as needed
+ except IOError:
+ font = ImageFont.load_default(font_size)
+
+ # Overlay each mask onto the overlay image
+ for mask, mask_prompt, color in zip(masks, mask_prompts, colors):
+ # Convert mask to RGBA mode
+ mask_rgba = mask.convert('RGBA')
+ mask_data = mask_rgba.getdata()
+ new_data = [(color if item[:3] == (255, 255, 255) else (0, 0, 0, 0)) for item in mask_data]
+ mask_rgba.putdata(new_data)
+
+ # Draw the mask prompt text on the mask
+ draw = ImageDraw.Draw(mask_rgba)
+ mask_bbox = mask.getbbox() # Get the bounding box of the mask
+ text_position = (mask_bbox[0] + 10, mask_bbox[1] + 10) # Adjust text position based on mask position
+ draw.text(text_position, mask_prompt, fill=(255, 255, 255, 255), font=font)
+
+ # Alpha composite the overlay with this mask
+ overlay = Image.alpha_composite(overlay, mask_rgba)
+
+ # Composite the overlay onto the original image
+ result = Image.alpha_composite(image.convert('RGBA'), overlay)
+
+ # Save or display the resulting image
+ result.save(output_path)
+
+ return result
+
+def example(pipe, seeds, example_id, global_prompt, entity_prompts):
+ dataset_snapshot_download(dataset_id="DiffSynth-Studio/examples_in_diffsynth", local_dir="./", allow_file_pattern=f"data/examples/eligen/qwen-image/example_{example_id}/*.png")
+ masks = [Image.open(f"./data/examples/eligen/qwen-image/example_{example_id}/{i}.png").convert('RGB').resize((1024, 1024)) for i in range(len(entity_prompts))]
+ negative_prompt = "网格化,规则的网格,模糊, 低分辨率, 低质量, 变形, 畸形, 错误的解剖学, 变形的手, 变形的身体, 变形的脸, 变形的头发, 变形的眼睛, 变形的嘴巴"
+ for seed in seeds:
+ # generate image
+ image = pipe(
+ prompt=global_prompt,
+ cfg_scale=4.0,
+ negative_prompt=negative_prompt,
+ num_inference_steps=40,
+ seed=seed,
+ height=1024,
+ width=1024,
+ eligen_entity_prompts=entity_prompts,
+ eligen_entity_masks=masks,
+ )
+ image.save(f"eligen_example_{example_id}_{seed}.png")
+ visualize_masks(image, masks, entity_prompts, f"eligen_example_{example_id}_mask_{seed}.png")
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+snapshot_download("DiffSynth-Studio/Qwen-Image-EliGen-V2", local_dir="models/DiffSynth-Studio/Qwen-Image-EliGen-V2", allow_file_pattern="model.safetensors")
+pipe.load_lora(pipe.dit, "models/DiffSynth-Studio/Qwen-Image-EliGen-V2/model.safetensors")
+
+seeds = [0]
+
+global_prompt = "写实摄影风格. A beautiful asia woman wearing white dress, she is holding a mirror with her right arm, with a beach background."
+entity_prompts = ["A beautiful woman", "mirror", "necklace", "glasses", "earring", "white dress", "jewelry headpiece"]
+example(pipe, seeds, 7, global_prompt, entity_prompts)
+
+global_prompt = "写实摄影风格, 细节丰富。街头一位漂亮的女孩,穿着衬衫和短裤,手持写有“实体控制”的标牌,背景是繁忙的城市街道,阳光明媚,行人匆匆。"
+entity_prompts = ["一个漂亮的女孩", "标牌 '实体控制'", "短裤", "衬衫"]
+example(pipe, seeds, 4, global_prompt, entity_prompts)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-EliGen.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-EliGen.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab0fd148d0d03d32ca6507c85e2d95e49a70c228
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-EliGen.py
@@ -0,0 +1,128 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+from PIL import Image, ImageDraw, ImageFont
+from modelscope import dataset_snapshot_download, snapshot_download
+import random
+
+
+def visualize_masks(image, masks, mask_prompts, output_path, font_size=35, use_random_colors=False):
+ # Create a blank image for overlays
+ overlay = Image.new('RGBA', image.size, (0, 0, 0, 0))
+
+ colors = [
+ (165, 238, 173, 80),
+ (76, 102, 221, 80),
+ (221, 160, 77, 80),
+ (204, 93, 71, 80),
+ (145, 187, 149, 80),
+ (134, 141, 172, 80),
+ (157, 137, 109, 80),
+ (153, 104, 95, 80),
+ (165, 238, 173, 80),
+ (76, 102, 221, 80),
+ (221, 160, 77, 80),
+ (204, 93, 71, 80),
+ (145, 187, 149, 80),
+ (134, 141, 172, 80),
+ (157, 137, 109, 80),
+ (153, 104, 95, 80),
+ ]
+ # Generate random colors for each mask
+ if use_random_colors:
+ colors = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 80) for _ in range(len(masks))]
+
+ # Font settings
+ try:
+ font = ImageFont.truetype("wqy-zenhei.ttc", font_size) # Adjust as needed
+ except IOError:
+ font = ImageFont.load_default(font_size)
+
+ # Overlay each mask onto the overlay image
+ for mask, mask_prompt, color in zip(masks, mask_prompts, colors):
+ # Convert mask to RGBA mode
+ mask_rgba = mask.convert('RGBA')
+ mask_data = mask_rgba.getdata()
+ new_data = [(color if item[:3] == (255, 255, 255) else (0, 0, 0, 0)) for item in mask_data]
+ mask_rgba.putdata(new_data)
+
+ # Draw the mask prompt text on the mask
+ draw = ImageDraw.Draw(mask_rgba)
+ mask_bbox = mask.getbbox() # Get the bounding box of the mask
+ text_position = (mask_bbox[0] + 10, mask_bbox[1] + 10) # Adjust text position based on mask position
+ draw.text(text_position, mask_prompt, fill=(255, 255, 255, 255), font=font)
+
+ # Alpha composite the overlay with this mask
+ overlay = Image.alpha_composite(overlay, mask_rgba)
+
+ # Composite the overlay onto the original image
+ result = Image.alpha_composite(image.convert('RGBA'), overlay)
+
+ # Save or display the resulting image
+ result.save(output_path)
+
+ return result
+
+def example(pipe, seeds, example_id, global_prompt, entity_prompts):
+ dataset_snapshot_download(dataset_id="DiffSynth-Studio/examples_in_diffsynth", local_dir="./", allow_file_pattern=f"data/examples/eligen/qwen-image/example_{example_id}/*.png")
+ masks = [Image.open(f"./data/examples/eligen/qwen-image/example_{example_id}/{i}.png").convert('RGB') for i in range(len(entity_prompts))]
+ negative_prompt = ""
+ for seed in seeds:
+ # generate image
+ image = pipe(
+ prompt=global_prompt,
+ cfg_scale=4.0,
+ negative_prompt=negative_prompt,
+ num_inference_steps=30,
+ seed=seed,
+ height=1024,
+ width=1024,
+ eligen_entity_prompts=entity_prompts,
+ eligen_entity_masks=masks,
+ )
+ image.save(f"eligen_example_{example_id}_{seed}.png")
+ visualize_masks(image, masks, entity_prompts, f"eligen_example_{example_id}_mask_{seed}.png")
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+snapshot_download("DiffSynth-Studio/Qwen-Image-EliGen", local_dir="models/DiffSynth-Studio/Qwen-Image-EliGen", allow_file_pattern="model.safetensors")
+pipe.load_lora(pipe.dit, "models/DiffSynth-Studio/Qwen-Image-EliGen/model.safetensors")
+
+# example 1
+global_prompt = "A breathtaking beauty of Raja Ampat by the late-night moonlight , one beautiful woman from behind wearing a pale blue long dress with soft glow, sitting at the top of a cliff looking towards the beach,pastell light colors, a group of small distant birds flying in far sky, a boat sailing on the sea, best quality, realistic, whimsical, fantastic, splash art, intricate detailed, hyperdetailed, maximalist style, photorealistic, concept art, sharp focus, harmony, serenity, tranquility, soft pastell colors,ambient occlusion, cozy ambient lighting, masterpiece, liiv1, linquivera, metix, mentixis, masterpiece, award winning, view from above\n"
+entity_prompts = ["cliff", "sea", "moon", "sailing boat", "a seated beautiful woman", "pale blue long dress with soft glow"]
+example(pipe, [0], 1, global_prompt, entity_prompts)
+
+# example 2
+global_prompt = "samurai girl wearing a kimono, she's holding a sword glowing with red flame, her long hair is flowing in the wind, she is looking at a small bird perched on the back of her hand. ultra realist style. maximum image detail. maximum realistic render."
+entity_prompts = ["flowing hair", "sword glowing with red flame", "A cute bird", "yellow belt"]
+example(pipe, [0], 2, global_prompt, entity_prompts)
+
+# example 3
+global_prompt = "Image of a neverending staircase up to a mysterious palace in the sky, The ancient palace stood majestically atop a mist-shrouded mountain, sunrise, two traditional monk walk in the stair looking at the sunrise, fog,see-through, best quality, whimsical, fantastic, splash art, intricate detailed, hyperdetailed, photorealistic, concept art, harmony, serenity, tranquility, ambient occlusion, halation, cozy ambient lighting, dynamic lighting,masterpiece, liiv1, linquivera, metix, mentixis, masterpiece, award winning,"
+entity_prompts = ["ancient palace", "stone staircase with railings", "a traditional monk", "a traditional monk"]
+example(pipe, [27], 3, global_prompt, entity_prompts)
+
+# example 4
+global_prompt = "A beautiful girl wearing shirt and shorts in the street, holding a sign 'Entity Control'"
+entity_prompts = ["A beautiful girl", "sign 'Entity Control'", "shorts", "shirt"]
+example(pipe, [21], 4, global_prompt, entity_prompts)
+
+# example 5
+global_prompt = "A captivating, dramatic scene in a painting that exudes mystery and foreboding. A white sky, swirling blue clouds, and a crescent yellow moon illuminate a solitary woman standing near the water's edge. Her long dress flows in the wind, silhouetted against the eerie glow. The water mirrors the fiery sky and moonlight, amplifying the uneasy atmosphere."
+entity_prompts = ["crescent yellow moon", "a solitary woman", "water", "swirling blue clouds"]
+example(pipe, [0], 5, global_prompt, entity_prompts)
+
+# example 7, same prompt with different seeds
+seeds = range(5, 9)
+global_prompt = "A beautiful asia woman wearing white dress, holding a mirror, with a forest background."
+entity_prompts = ["A beautiful woman", "mirror", "necklace", "glasses", "earring", "white dress", "jewelry headpiece"]
+example(pipe, seeds, 7, global_prompt, entity_prompts)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-In-Context-Control-Union.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-In-Context-Control-Union.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a27ac019162add40de54f0b62863a5249d5c989
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image-In-Context-Control-Union.py
@@ -0,0 +1,35 @@
+from PIL import Image
+import torch
+from modelscope import dataset_snapshot_download, snapshot_download
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+from diffsynth.controlnets.processors import Annotator
+
+allow_file_pattern = ["sk_model.pth", "sk_model2.pth", "dpt_hybrid-midas-501f0c75.pt", "ControlNetHED.pth", "body_pose_model.pth", "hand_pose_model.pth", "facenet.pth", "scannet.pt"]
+snapshot_download("lllyasviel/Annotators", local_dir="models/Annotators", allow_file_pattern=allow_file_pattern)
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+snapshot_download("DiffSynth-Studio/Qwen-Image-In-Context-Control-Union", local_dir="models/DiffSynth-Studio/Qwen-Image-In-Context-Control-Union", allow_file_pattern="model.safetensors")
+pipe.load_lora(pipe.dit, "models/DiffSynth-Studio/Qwen-Image-In-Context-Control-Union/model.safetensors")
+
+dataset_snapshot_download(dataset_id="DiffSynth-Studio/examples_in_diffsynth", local_dir="./", allow_file_pattern=f"data/examples/qwen-image-context-control/image.jpg")
+origin_image = Image.open("data/examples/qwen-image-context-control/image.jpg").resize((1024, 1024))
+annotator_ids = ['openpose', 'canny', 'depth', 'lineart', 'softedge', 'normal']
+for annotator_id in annotator_ids:
+ annotator = Annotator(processor_id=annotator_id, device="cuda")
+ control_image = annotator(origin_image)
+ control_image.save(f"{annotator.processor_id}.png")
+
+ control_prompt = "Context_Control. "
+ prompt = f"{control_prompt}一个穿着淡蓝色的漂亮女孩正在翩翩起舞,背景是梦幻的星空,光影交错,细节精致。"
+ negative_prompt = "网格化,规则的网格,模糊, 低分辨率, 低质量, 变形, 畸形, 错误的解剖学, 变形的手, 变形的身体, 变形的脸, 变形的头发, 变形的眼睛, 变形的嘴巴"
+ image = pipe(prompt, seed=1, negative_prompt=negative_prompt, context_image=control_image, height=1024, width=1024)
+ image.save(f"image_{annotator.processor_id}.png")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image.py
new file mode 100644
index 0000000000000000000000000000000000000000..275cfba8c33db6971b87493f2966b6b235744588
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference/Qwen-Image.py
@@ -0,0 +1,17 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(prompt, seed=0, num_inference_steps=40)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Canny.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Canny.py
new file mode 100644
index 0000000000000000000000000000000000000000..e73d62905c4eca3424a878b8c35ae9933d0942fb
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Canny.py
@@ -0,0 +1,32 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
+from PIL import Image
+import torch
+from modelscope import dataset_snapshot_download
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Canny", origin_file_pattern="model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_image_dataset",
+ local_dir="./data/example_image_dataset",
+ allow_file_pattern="canny/image_1.jpg"
+)
+controlnet_image = Image.open("data/example_image_dataset/canny/image_1.jpg").resize((1328, 1328))
+
+prompt = "一只小狗,毛发光洁柔顺,眼神灵动,背景是樱花纷飞的春日庭院,唯美温馨。"
+image = pipe(
+ prompt, seed=0,
+ blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image)]
+)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Depth.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Depth.py
new file mode 100644
index 0000000000000000000000000000000000000000..601fb3e7151dd0930e30e41b1403a454ba4d8892
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Depth.py
@@ -0,0 +1,33 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
+from PIL import Image
+import torch
+from modelscope import dataset_snapshot_download
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Depth", origin_file_pattern="model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_image_dataset",
+ local_dir="./data/example_image_dataset",
+ allow_file_pattern="depth/image_1.jpg"
+)
+
+controlnet_image = Image.open("data/example_image_dataset/depth/image_1.jpg").resize((1328, 1328))
+
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(
+ prompt, seed=0,
+ blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image)]
+)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Inpaint.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Inpaint.py
new file mode 100644
index 0000000000000000000000000000000000000000..098993238a994ef17c2de337743228ac3225e8ff
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Blockwise-ControlNet-Inpaint.py
@@ -0,0 +1,34 @@
+import torch
+from PIL import Image
+from modelscope import dataset_snapshot_download
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Inpaint", origin_file_pattern="model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_image_dataset",
+ local_dir="./data/example_image_dataset",
+ allow_file_pattern="inpaint/*.jpg"
+)
+prompt = "a cat with sunglasses"
+controlnet_image = Image.open("./data/example_image_dataset/inpaint/image_1.jpg").convert("RGB").resize((1328, 1328))
+inpaint_mask = Image.open("./data/example_image_dataset/inpaint/mask.jpg").convert("RGB").resize((1328, 1328))
+image = pipe(
+ prompt, seed=0,
+ input_image=controlnet_image, inpaint_mask=inpaint_mask,
+ blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image, inpaint_mask=inpaint_mask)],
+ num_inference_steps=40,
+)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Distill-Full.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Distill-Full.py
new file mode 100644
index 0000000000000000000000000000000000000000..0839dd0c1806b4500ed3a6c5a408ae826a2ffa07
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Distill-Full.py
@@ -0,0 +1,18 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Distill-Full", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(prompt, seed=0, num_inference_steps=15, cfg_scale=1)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Distill-LoRA.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Distill-LoRA.py
new file mode 100644
index 0000000000000000000000000000000000000000..a058c02177d152e68c23cec82024e2bd747719c3
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Distill-LoRA.py
@@ -0,0 +1,22 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+from modelscope import snapshot_download
+import torch
+
+# Please do not use float8 on this model
+snapshot_download("DiffSynth-Studio/Qwen-Image-Distill-LoRA", local_dir="models/DiffSynth-Studio/Qwen-Image-Distill-LoRA")
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+pipe.load_lora(pipe.dit, "models/DiffSynth-Studio/Qwen-Image-Distill-LoRA/model.safetensors")
+
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(prompt, seed=0, num_inference_steps=15, cfg_scale=1)
+image.save("image.jpg")
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit-Lowres-Fix.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit-Lowres-Fix.py
new file mode 100644
index 0000000000000000000000000000000000000000..55c771f8f928b0101b43492303fd7210ac891f57
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit-Lowres-Fix.py
@@ -0,0 +1,28 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+from modelscope import snapshot_download
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=None,
+ processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"),
+)
+pipe.enable_vram_management()
+
+snapshot_download("DiffSynth-Studio/Qwen-Image-Edit-Lowres-Fix", local_dir="models/DiffSynth-Studio/Qwen-Image-Edit-Lowres-Fix", allow_file_pattern="model.safetensors")
+pipe.load_lora(pipe.dit, "models/DiffSynth-Studio/Qwen-Image-Edit-Lowres-Fix/model.safetensors")
+
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(prompt=prompt, seed=0, num_inference_steps=40, height=1024, width=768)
+image.save("image.jpg")
+
+prompt = "将裙子变成粉色"
+image = image.resize((512, 384))
+image = pipe(prompt, edit_image=image, seed=1, num_inference_steps=40, height=1024, width=768, edit_rope_interpolation=True, edit_image_auto_resize=False)
+image.save(f"image2.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cc79fd3b7a538c2621b4a1727939e09e3ff94b3
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-Edit.py
@@ -0,0 +1,23 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=None,
+ processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"),
+)
+pipe.enable_vram_management()
+
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(prompt=prompt, seed=0, num_inference_steps=40, height=1024, width=1024)
+image.save("image1.jpg")
+
+prompt = "将裙子改为粉色"
+image = pipe(prompt, edit_image=image, seed=1, num_inference_steps=40, height=1024, width=1024)
+image.save(f"image2.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-EliGen-V2.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-EliGen-V2.py
new file mode 100644
index 0000000000000000000000000000000000000000..f51fd47cb3c077ecd859b51c40bddcb42c37dc49
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-EliGen-V2.py
@@ -0,0 +1,108 @@
+import torch
+import random
+from PIL import Image, ImageDraw, ImageFont
+from modelscope import dataset_snapshot_download, snapshot_download
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+
+
+def visualize_masks(image, masks, mask_prompts, output_path, font_size=35, use_random_colors=False):
+ # Create a blank image for overlays
+ overlay = Image.new('RGBA', image.size, (0, 0, 0, 0))
+
+ colors = [
+ (165, 238, 173, 80),
+ (76, 102, 221, 80),
+ (221, 160, 77, 80),
+ (204, 93, 71, 80),
+ (145, 187, 149, 80),
+ (134, 141, 172, 80),
+ (157, 137, 109, 80),
+ (153, 104, 95, 80),
+ (165, 238, 173, 80),
+ (76, 102, 221, 80),
+ (221, 160, 77, 80),
+ (204, 93, 71, 80),
+ (145, 187, 149, 80),
+ (134, 141, 172, 80),
+ (157, 137, 109, 80),
+ (153, 104, 95, 80),
+ ]
+ # Generate random colors for each mask
+ if use_random_colors:
+ colors = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 80) for _ in range(len(masks))]
+
+ # Font settings
+ try:
+ font = ImageFont.truetype("wqy-zenhei.ttc", font_size) # Adjust as needed
+ except IOError:
+ font = ImageFont.load_default(font_size)
+
+ # Overlay each mask onto the overlay image
+ for mask, mask_prompt, color in zip(masks, mask_prompts, colors):
+ # Convert mask to RGBA mode
+ mask_rgba = mask.convert('RGBA')
+ mask_data = mask_rgba.getdata()
+ new_data = [(color if item[:3] == (255, 255, 255) else (0, 0, 0, 0)) for item in mask_data]
+ mask_rgba.putdata(new_data)
+
+ # Draw the mask prompt text on the mask
+ draw = ImageDraw.Draw(mask_rgba)
+ mask_bbox = mask.getbbox() # Get the bounding box of the mask
+ text_position = (mask_bbox[0] + 10, mask_bbox[1] + 10) # Adjust text position based on mask position
+ draw.text(text_position, mask_prompt, fill=(255, 255, 255, 255), font=font)
+
+ # Alpha composite the overlay with this mask
+ overlay = Image.alpha_composite(overlay, mask_rgba)
+
+ # Composite the overlay onto the original image
+ result = Image.alpha_composite(image.convert('RGBA'), overlay)
+
+ # Save or display the resulting image
+ result.save(output_path)
+
+ return result
+
+def example(pipe, seeds, example_id, global_prompt, entity_prompts):
+ dataset_snapshot_download(dataset_id="DiffSynth-Studio/examples_in_diffsynth", local_dir="./", allow_file_pattern=f"data/examples/eligen/qwen-image/example_{example_id}/*.png")
+ masks = [Image.open(f"./data/examples/eligen/qwen-image/example_{example_id}/{i}.png").convert('RGB').resize((1024, 1024)) for i in range(len(entity_prompts))]
+ negative_prompt = "网格化,规则的网格,模糊, 低分辨率, 低质量, 变形, 畸形, 错误的解剖学, 变形的手, 变形的身体, 变形的脸, 变形的头发, 变形的眼睛, 变形的嘴巴"
+ for seed in seeds:
+ # generate image
+ image = pipe(
+ prompt=global_prompt,
+ cfg_scale=4.0,
+ negative_prompt=negative_prompt,
+ num_inference_steps=40,
+ seed=seed,
+ height=1024,
+ width=1024,
+ eligen_entity_prompts=entity_prompts,
+ eligen_entity_masks=masks,
+ )
+ image.save(f"eligen_example_{example_id}_{seed}.png")
+ visualize_masks(image, masks, entity_prompts, f"eligen_example_{example_id}_mask_{seed}.png")
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+snapshot_download("DiffSynth-Studio/Qwen-Image-EliGen-V2", local_dir="models/DiffSynth-Studio/Qwen-Image-EliGen-V2", allow_file_pattern="model.safetensors")
+pipe.load_lora(pipe.dit, "models/DiffSynth-Studio/Qwen-Image-EliGen-V2/model.safetensors")
+
+seeds = [0]
+
+global_prompt = "写实摄影风格. A beautiful asia woman wearing white dress, she is holding a mirror with her right arm, with a beach background."
+entity_prompts = ["A beautiful woman", "mirror", "necklace", "glasses", "earring", "white dress", "jewelry headpiece"]
+example(pipe, seeds, 7, global_prompt, entity_prompts)
+
+global_prompt = "写实摄影风格, 细节丰富。街头一位漂亮的女孩,穿着衬衫和短裤,手持写有“实体控制”的标牌,背景是繁忙的城市街道,阳光明媚,行人匆匆。"
+entity_prompts = ["一个漂亮的女孩", "标牌 '实体控制'", "短裤", "衬衫"]
+example(pipe, seeds, 4, global_prompt, entity_prompts)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-EliGen.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-EliGen.py
new file mode 100644
index 0000000000000000000000000000000000000000..b13c57dfd6fd7a719da11aafb424b4d6817ba6ab
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-EliGen.py
@@ -0,0 +1,129 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+from PIL import Image, ImageDraw, ImageFont
+from modelscope import dataset_snapshot_download, snapshot_download
+import random
+
+
+def visualize_masks(image, masks, mask_prompts, output_path, font_size=35, use_random_colors=False):
+ # Create a blank image for overlays
+ overlay = Image.new('RGBA', image.size, (0, 0, 0, 0))
+
+ colors = [
+ (165, 238, 173, 80),
+ (76, 102, 221, 80),
+ (221, 160, 77, 80),
+ (204, 93, 71, 80),
+ (145, 187, 149, 80),
+ (134, 141, 172, 80),
+ (157, 137, 109, 80),
+ (153, 104, 95, 80),
+ (165, 238, 173, 80),
+ (76, 102, 221, 80),
+ (221, 160, 77, 80),
+ (204, 93, 71, 80),
+ (145, 187, 149, 80),
+ (134, 141, 172, 80),
+ (157, 137, 109, 80),
+ (153, 104, 95, 80),
+ ]
+ # Generate random colors for each mask
+ if use_random_colors:
+ colors = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 80) for _ in range(len(masks))]
+
+ # Font settings
+ try:
+ font = ImageFont.truetype("wqy-zenhei.ttc", font_size) # Adjust as needed
+ except IOError:
+ font = ImageFont.load_default(font_size)
+
+ # Overlay each mask onto the overlay image
+ for mask, mask_prompt, color in zip(masks, mask_prompts, colors):
+ # Convert mask to RGBA mode
+ mask_rgba = mask.convert('RGBA')
+ mask_data = mask_rgba.getdata()
+ new_data = [(color if item[:3] == (255, 255, 255) else (0, 0, 0, 0)) for item in mask_data]
+ mask_rgba.putdata(new_data)
+
+ # Draw the mask prompt text on the mask
+ draw = ImageDraw.Draw(mask_rgba)
+ mask_bbox = mask.getbbox() # Get the bounding box of the mask
+ text_position = (mask_bbox[0] + 10, mask_bbox[1] + 10) # Adjust text position based on mask position
+ draw.text(text_position, mask_prompt, fill=(255, 255, 255, 255), font=font)
+
+ # Alpha composite the overlay with this mask
+ overlay = Image.alpha_composite(overlay, mask_rgba)
+
+ # Composite the overlay onto the original image
+ result = Image.alpha_composite(image.convert('RGBA'), overlay)
+
+ # Save or display the resulting image
+ result.save(output_path)
+
+ return result
+
+def example(pipe, seeds, example_id, global_prompt, entity_prompts):
+ dataset_snapshot_download(dataset_id="DiffSynth-Studio/examples_in_diffsynth", local_dir="./", allow_file_pattern=f"data/examples/eligen/qwen-image/example_{example_id}/*.png")
+ masks = [Image.open(f"./data/examples/eligen/qwen-image/example_{example_id}/{i}.png").convert('RGB') for i in range(len(entity_prompts))]
+ negative_prompt = ""
+ for seed in seeds:
+ # generate image
+ image = pipe(
+ prompt=global_prompt,
+ cfg_scale=4.0,
+ negative_prompt=negative_prompt,
+ num_inference_steps=30,
+ seed=seed,
+ height=1024,
+ width=1024,
+ eligen_entity_prompts=entity_prompts,
+ eligen_entity_masks=masks,
+ )
+ image.save(f"eligen_example_{example_id}_{seed}.png")
+ visualize_masks(image, masks, entity_prompts, f"eligen_example_{example_id}_mask_{seed}.png")
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+snapshot_download("DiffSynth-Studio/Qwen-Image-EliGen", local_dir="models/DiffSynth-Studio/Qwen-Image-EliGen", allow_file_pattern="model.safetensors")
+pipe.load_lora(pipe.dit, "models/DiffSynth-Studio/Qwen-Image-EliGen/model.safetensors")
+
+# example 1
+global_prompt = "A breathtaking beauty of Raja Ampat by the late-night moonlight , one beautiful woman from behind wearing a pale blue long dress with soft glow, sitting at the top of a cliff looking towards the beach,pastell light colors, a group of small distant birds flying in far sky, a boat sailing on the sea, best quality, realistic, whimsical, fantastic, splash art, intricate detailed, hyperdetailed, maximalist style, photorealistic, concept art, sharp focus, harmony, serenity, tranquility, soft pastell colors,ambient occlusion, cozy ambient lighting, masterpiece, liiv1, linquivera, metix, mentixis, masterpiece, award winning, view from above\n"
+entity_prompts = ["cliff", "sea", "moon", "sailing boat", "a seated beautiful woman", "pale blue long dress with soft glow"]
+example(pipe, [0], 1, global_prompt, entity_prompts)
+
+# example 2
+global_prompt = "samurai girl wearing a kimono, she's holding a sword glowing with red flame, her long hair is flowing in the wind, she is looking at a small bird perched on the back of her hand. ultra realist style. maximum image detail. maximum realistic render."
+entity_prompts = ["flowing hair", "sword glowing with red flame", "A cute bird", "yellow belt"]
+example(pipe, [0], 2, global_prompt, entity_prompts)
+
+# example 3
+global_prompt = "Image of a neverending staircase up to a mysterious palace in the sky, The ancient palace stood majestically atop a mist-shrouded mountain, sunrise, two traditional monk walk in the stair looking at the sunrise, fog,see-through, best quality, whimsical, fantastic, splash art, intricate detailed, hyperdetailed, photorealistic, concept art, harmony, serenity, tranquility, ambient occlusion, halation, cozy ambient lighting, dynamic lighting,masterpiece, liiv1, linquivera, metix, mentixis, masterpiece, award winning,"
+entity_prompts = ["ancient palace", "stone staircase with railings", "a traditional monk", "a traditional monk"]
+example(pipe, [27], 3, global_prompt, entity_prompts)
+
+# example 4
+global_prompt = "A beautiful girl wearing shirt and shorts in the street, holding a sign 'Entity Control'"
+entity_prompts = ["A beautiful girl", "sign 'Entity Control'", "shorts", "shirt"]
+example(pipe, [21], 4, global_prompt, entity_prompts)
+
+# example 5
+global_prompt = "A captivating, dramatic scene in a painting that exudes mystery and foreboding. A white sky, swirling blue clouds, and a crescent yellow moon illuminate a solitary woman standing near the water's edge. Her long dress flows in the wind, silhouetted against the eerie glow. The water mirrors the fiery sky and moonlight, amplifying the uneasy atmosphere."
+entity_prompts = ["crescent yellow moon", "a solitary woman", "water", "swirling blue clouds"]
+example(pipe, [0], 5, global_prompt, entity_prompts)
+
+# example 7, same prompt with different seeds
+seeds = range(5, 9)
+global_prompt = "A beautiful asia woman wearing white dress, holding a mirror, with a forest background."
+entity_prompts = ["A beautiful woman", "mirror", "necklace", "glasses", "earring", "white dress", "jewelry headpiece"]
+example(pipe, seeds, 7, global_prompt, entity_prompts)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-In-Context-Control-Union.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-In-Context-Control-Union.py
new file mode 100644
index 0000000000000000000000000000000000000000..9faced855a0a2250f06a8bb4073733445e7ddac4
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image-In-Context-Control-Union.py
@@ -0,0 +1,36 @@
+from PIL import Image
+import torch
+from modelscope import dataset_snapshot_download, snapshot_download
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+from diffsynth.controlnets.processors import Annotator
+
+allow_file_pattern = ["sk_model.pth", "sk_model2.pth", "dpt_hybrid-midas-501f0c75.pt", "ControlNetHED.pth", "body_pose_model.pth", "hand_pose_model.pth", "facenet.pth", "scannet.pt"]
+snapshot_download("lllyasviel/Annotators", local_dir="models/Annotators", allow_file_pattern=allow_file_pattern)
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+snapshot_download("DiffSynth-Studio/Qwen-Image-In-Context-Control-Union", local_dir="models/DiffSynth-Studio/Qwen-Image-In-Context-Control-Union", allow_file_pattern="model.safetensors")
+pipe.load_lora(pipe.dit, "models/DiffSynth-Studio/Qwen-Image-In-Context-Control-Union/model.safetensors")
+
+dataset_snapshot_download(dataset_id="DiffSynth-Studio/examples_in_diffsynth", local_dir="./", allow_file_pattern=f"data/examples/qwen-image-context-control/image.jpg")
+origin_image = Image.open("data/examples/qwen-image-context-control/image.jpg").resize((1024, 1024))
+annotator_ids = ['openpose', 'canny', 'depth', 'lineart', 'softedge', 'normal']
+for annotator_id in annotator_ids:
+ annotator = Annotator(processor_id=annotator_id, device="cuda")
+ control_image = annotator(origin_image)
+ control_image.save(f"{annotator.processor_id}.png")
+
+ control_prompt = "Context_Control. "
+ prompt = f"{control_prompt}一个穿着淡蓝色的漂亮女孩正在翩翩起舞,背景是梦幻的星空,光影交错,细节精致。"
+ negative_prompt = "网格化,规则的网格,模糊, 低分辨率, 低质量, 变形, 畸形, 错误的解剖学, 变形的手, 变形的身体, 变形的脸, 变形的头发, 变形的眼睛, 变形的嘴巴"
+ image = pipe(prompt, seed=1, negative_prompt=negative_prompt, context_image=control_image, height=1024, width=1024)
+ image.save(f"image_{annotator.processor_id}.png")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ecc11f9fb06100eed63f919ede1c8c5b0640c56
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_inference_low_vram/Qwen-Image.py
@@ -0,0 +1,18 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.enable_vram_management()
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(prompt, seed=0, num_inference_steps=40)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Canny.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Canny.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f563fd155b2a9f71bb0f11e4657acd9dec0219bb
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Canny.sh
@@ -0,0 +1,38 @@
+accelerate launch examples/qwen_image/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_blockwise_controlnet_canny.csv \
+ --data_file_keys "image,blockwise_controlnet_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors,DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Canny:model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.blockwise_controlnet.models.0." \
+ --output_path "./models/train/Qwen-Image-Blockwise-ControlNet-Canny_full" \
+ --trainable_models "blockwise_controlnet" \
+ --extra_inputs "blockwise_controlnet_image" \
+ --use_gradient_checkpointing \
+ --find_unused_parameters
+
+# If you want to pre-train a Blockwise ControlNet from scratch,
+# please run the following script to first generate the initialized model weights file,
+# and then start training with a high learning rate (1e-3).
+
+# python examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Initialize.py
+
+# accelerate launch examples/qwen_image/model_training/train.py \
+# --dataset_base_path data/example_image_dataset \
+# --dataset_metadata_path data/example_image_dataset/metadata_blockwise_controlnet_canny.csv \
+# --data_file_keys "image,blockwise_controlnet_image" \
+# --max_pixels 1048576 \
+# --dataset_repeat 50 \
+# --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
+# --model_paths '["models/blockwise_controlnet.safetensors"]' \
+# --learning_rate 1e-3 \
+# --num_epochs 2 \
+# --remove_prefix_in_ckpt "pipe.blockwise_controlnet.models.0." \
+# --output_path "./models/train/Qwen-Image-Blockwise-ControlNet-Canny_full" \
+# --trainable_models "blockwise_controlnet" \
+# --extra_inputs "blockwise_controlnet_image" \
+# --use_gradient_checkpointing \
+# --find_unused_parameters
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Depth.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Depth.sh
new file mode 100644
index 0000000000000000000000000000000000000000..2bd2926157aa1eb6333d3378a27613e332452f5a
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Depth.sh
@@ -0,0 +1,38 @@
+accelerate launch examples/qwen_image/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_blockwise_controlnet_depth.csv \
+ --data_file_keys "image,blockwise_controlnet_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors,DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Depth:model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.blockwise_controlnet.models.0." \
+ --output_path "./models/train/Qwen-Image-Blockwise-ControlNet-Depth_full" \
+ --trainable_models "blockwise_controlnet" \
+ --extra_inputs "blockwise_controlnet_image" \
+ --use_gradient_checkpointing \
+ --find_unused_parameters
+
+# If you want to pre-train a Blockwise ControlNet from scratch,
+# please run the following script to first generate the initialized model weights file,
+# and then start training with a high learning rate (1e-3).
+
+# python examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Initialize.py
+
+# accelerate launch examples/qwen_image/model_training/train.py \
+# --dataset_base_path data/example_image_dataset \
+# --dataset_metadata_path data/example_image_dataset/metadata_blockwise_controlnet_depth.csv \
+# --data_file_keys "image,blockwise_controlnet_image" \
+# --max_pixels 1048576 \
+# --dataset_repeat 50 \
+# --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
+# --model_paths '["models/blockwise_controlnet.safetensors"]' \
+# --learning_rate 1e-3 \
+# --num_epochs 2 \
+# --remove_prefix_in_ckpt "pipe.blockwise_controlnet.models.0." \
+# --output_path "./models/train/Qwen-Image-Blockwise-ControlNet-Depth_full" \
+# --trainable_models "blockwise_controlnet" \
+# --extra_inputs "blockwise_controlnet_image" \
+# --use_gradient_checkpointing \
+# --find_unused_parameters
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Inpaint.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Inpaint.sh
new file mode 100644
index 0000000000000000000000000000000000000000..b87552bc2fb8f9e1a3a1086a3134129bd8017de3
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Blockwise-ControlNet-Inpaint.sh
@@ -0,0 +1,38 @@
+accelerate launch --config_file examples/qwen_image/model_training/full/accelerate_config.yaml examples/qwen_image/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_blockwise_controlnet_inpaint.csv \
+ --data_file_keys "image,blockwise_controlnet_image,blockwise_controlnet_inpaint_mask" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors,DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Inpaint:model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.blockwise_controlnet.models.0." \
+ --output_path "./models/train/Qwen-Image-Blockwise-ControlNet-Inpaint_full" \
+ --trainable_models "blockwise_controlnet" \
+ --extra_inputs "blockwise_controlnet_image,blockwise_controlnet_inpaint_mask" \
+ --use_gradient_checkpointing \
+ --find_unused_parameters
+
+# If you want to pre-train a Inpaint Blockwise ControlNet from scratch,
+# please run the following script to first generate the initialized model weights file,
+# and then start training with a high learning rate (1e-3).
+
+# python examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Inpaint-Initialize.py
+
+# accelerate launch --config_file examples/qwen_image/model_training/full/accelerate_config.yaml examples/qwen_image/model_training/train.py \
+# --dataset_base_path data/example_image_dataset \
+# --dataset_metadata_path data/example_image_dataset/metadata_blockwise_controlnet_inpaint.csv \
+# --data_file_keys "image,blockwise_controlnet_image,blockwise_controlnet_inpaint_mask" \
+# --max_pixels 1048576 \
+# --dataset_repeat 50 \
+# --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
+# --model_paths '["models/blockwise_controlnet_inpaint.safetensors"]' \
+# --learning_rate 1e-3 \
+# --num_epochs 2 \
+# --remove_prefix_in_ckpt "pipe.blockwise_controlnet.models.0." \
+# --output_path "./models/train/Qwen-Image-Blockwise-ControlNet-Inpaint_full" \
+# --trainable_models "blockwise_controlnet" \
+# --extra_inputs "blockwise_controlnet_image,blockwise_controlnet_inpaint_mask" \
+# --use_gradient_checkpointing \
+# --find_unused_parameters
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Distill-Full.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Distill-Full.sh
new file mode 100644
index 0000000000000000000000000000000000000000..a56fe9d2b409f3dd8e80a064b2dc63f115f32dfc
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Distill-Full.sh
@@ -0,0 +1,13 @@
+accelerate launch --config_file examples/qwen_image/model_training/full/accelerate_config_zero2offload.yaml examples/qwen_image/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "DiffSynth-Studio/Qwen-Image-Distill-Full:diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Qwen-Image-Distill-Full_full" \
+ --trainable_models "dit" \
+ --use_gradient_checkpointing \
+ --find_unused_parameters
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Edit.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Edit.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ec257654ee3a73834e2be3c78f34907ab9218fb3
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image-Edit.sh
@@ -0,0 +1,15 @@
+accelerate launch --config_file examples/qwen_image/model_training/full/accelerate_config_zero2offload.yaml examples/qwen_image/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_edit.csv \
+ --data_file_keys "image,edit_image" \
+ --extra_inputs "edit_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen-Image-Edit:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Qwen-Image-Edit_full" \
+ --trainable_models "dit" \
+ --use_gradient_checkpointing \
+ --find_unused_parameters
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image.sh
new file mode 100644
index 0000000000000000000000000000000000000000..979101e62c26e41ec408a0cbf188da5b35f3d50d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/Qwen-Image.sh
@@ -0,0 +1,13 @@
+accelerate launch --config_file examples/qwen_image/model_training/full/accelerate_config_zero2offload.yaml examples/qwen_image/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Qwen-Image_full" \
+ --trainable_models "dit" \
+ --use_gradient_checkpointing \
+ --find_unused_parameters
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/accelerate_config.yaml b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/accelerate_config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..83280f73f315a32eccb065f351d66b4b2678759d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/accelerate_config.yaml
@@ -0,0 +1,22 @@
+compute_environment: LOCAL_MACHINE
+debug: false
+deepspeed_config:
+ gradient_accumulation_steps: 1
+ offload_optimizer_device: none
+ offload_param_device: none
+ zero3_init_flag: false
+ zero_stage: 2
+distributed_type: DEEPSPEED
+downcast_bf16: 'no'
+enable_cpu_affinity: false
+machine_rank: 0
+main_training_function: main
+mixed_precision: bf16
+num_machines: 1
+num_processes: 8
+rdzv_backend: static
+same_network: true
+tpu_env: []
+tpu_use_cluster: false
+tpu_use_sudo: false
+use_cpu: false
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/accelerate_config_zero2offload.yaml b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/accelerate_config_zero2offload.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a75f3d91eeae160409650b482e5383ac26b297b
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/full/accelerate_config_zero2offload.yaml
@@ -0,0 +1,22 @@
+compute_environment: LOCAL_MACHINE
+debug: false
+deepspeed_config:
+ gradient_accumulation_steps: 1
+ offload_optimizer_device: 'cpu'
+ offload_param_device: 'cpu'
+ zero3_init_flag: false
+ zero_stage: 2
+distributed_type: DEEPSPEED
+downcast_bf16: 'no'
+enable_cpu_affinity: false
+machine_rank: 0
+main_training_function: main
+mixed_precision: bf16
+num_machines: 1
+num_processes: 8
+rdzv_backend: static
+same_network: true
+tpu_env: []
+tpu_use_cluster: false
+tpu_use_sudo: false
+use_cpu: false
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Canny.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Canny.sh
new file mode 100644
index 0000000000000000000000000000000000000000..226313466dc061481dc3e1ea427ed5f2ef8a63f1
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Canny.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/qwen_image/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_blockwise_controlnet_canny.csv \
+ --data_file_keys "image,blockwise_controlnet_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors,DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Canny:model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Qwen-Image-Blockwise-ControlNet-Canny_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \
+ --lora_rank 32 \
+ --extra_inputs "blockwise_controlnet_image" \
+ --use_gradient_checkpointing \
+ --find_unused_parameters
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Depth.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Depth.sh
new file mode 100644
index 0000000000000000000000000000000000000000..60d3ca3f8a331369092bec762f7a64ef39d5550d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Depth.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/qwen_image/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_blockwise_controlnet_depth.csv \
+ --data_file_keys "image,blockwise_controlnet_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors,DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Depth:model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Qwen-Image-Blockwise-ControlNet-Depth_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \
+ --lora_rank 32 \
+ --extra_inputs "blockwise_controlnet_image" \
+ --use_gradient_checkpointing \
+ --find_unused_parameters
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Inpaint.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Inpaint.sh
new file mode 100644
index 0000000000000000000000000000000000000000..853ffe2667421ffdbc949e3809907e71f0f8f9dc
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Blockwise-ControlNet-Inpaint.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/qwen_image/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_blockwise_controlnet_inpaint.csv \
+ --data_file_keys "image,blockwise_controlnet_image,blockwise_controlnet_inpaint_mask" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors,DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Inpaint:model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Qwen-Image-Blockwise-ControlNet-Inpaint_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \
+ --lora_rank 32 \
+ --extra_inputs "blockwise_controlnet_image,blockwise_controlnet_inpaint_mask" \
+ --use_gradient_checkpointing \
+ --find_unused_parameters
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Distill-Full.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Distill-Full.sh
new file mode 100644
index 0000000000000000000000000000000000000000..79d7c376de52ffc9268be3eb7eb3ef64f5f9956d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Distill-Full.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/qwen_image/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "DiffSynth-Studio/Qwen-Image-Distill-Full:diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Qwen-Image-Distill-Full_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \
+ --lora_rank 32 \
+ --use_gradient_checkpointing \
+ --find_unused_parameters
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Edit.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Edit.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0662b1e38bdb078fb4ec0fa818ee7846f43dfb2d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-Edit.sh
@@ -0,0 +1,18 @@
+accelerate launch examples/qwen_image/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata_edit.csv \
+ --data_file_keys "image,edit_image" \
+ --extra_inputs "edit_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen-Image-Edit:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Qwen-Image-Edit_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \
+ --lora_rank 32 \
+ --use_gradient_checkpointing \
+ --dataset_num_workers 8 \
+ --find_unused_parameters
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-EliGen.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-EliGen.sh
new file mode 100644
index 0000000000000000000000000000000000000000..af861e669116df4759a9d146198c69736b732095
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-EliGen.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/qwen_image/model_training/train.py \
+ --dataset_base_path "data/example_image_dataset" \
+ --dataset_metadata_path data/example_image_dataset/metadata_eligen.json \
+ --data_file_keys "image,eligen_entity_masks" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Qwen-Image-EliGen_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \
+ --lora_rank 32 \
+ --extra_inputs "eligen_entity_masks,eligen_entity_prompts" \
+ --use_gradient_checkpointing \
+ --find_unused_parameters
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-In-Context-Control-Union.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-In-Context-Control-Union.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d241ad0e33e734580d23d2759e0f1050afe5e1f2
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image-In-Context-Control-Union.sh
@@ -0,0 +1,20 @@
+accelerate launch examples/qwen_image/model_training/train.py \
+ --dataset_base_path "data/example_image_dataset" \
+ --dataset_metadata_path data/example_image_dataset/metadata_qwenimage_context.csv \
+ --data_file_keys "image,context_image" \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Qwen-Image-In-Context-Control-Union_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \
+ --lora_rank 64 \
+ --lora_checkpoint "models/DiffSynth-Studio/Qwen-Image-In-Context-Control-Union/model.safetensors" \
+ --extra_inputs "context_image" \
+ --use_gradient_checkpointing \
+ --find_unused_parameters
+
+# if you want to train from scratch, you can remove the --lora_checkpoint argument
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f1198a5f676f7bee45d0726e299e16c8f8a1456b
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/lora/Qwen-Image.sh
@@ -0,0 +1,16 @@
+accelerate launch examples/qwen_image/model_training/train.py \
+ --dataset_base_path data/example_image_dataset \
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
+ --max_pixels 1048576 \
+ --dataset_repeat 50 \
+ --model_id_with_origin_paths "Qwen/Qwen-Image:transformer/diffusion_pytorch_model*.safetensors,Qwen/Qwen-Image:text_encoder/model*.safetensors,Qwen/Qwen-Image:vae/diffusion_pytorch_model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Qwen-Image_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "to_q,to_k,to_v,add_q_proj,add_k_proj,add_v_proj,to_out.0,to_add_out,img_mlp.net.2,img_mod.1,txt_mlp.net.2,txt_mod.1" \
+ --lora_rank 32 \
+ --use_gradient_checkpointing \
+ --dataset_num_workers 8 \
+ --find_unused_parameters
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Initialize.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Initialize.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b0392f589d7739197ba4251bb1c6bfb0fcfe498
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Initialize.py
@@ -0,0 +1,13 @@
+# This script is for initializing a Qwen-Image-Blockwise-ControlNet
+from diffsynth import hash_state_dict_keys
+from diffsynth.models.qwen_image_controlnet import QwenImageBlockWiseControlNet
+import torch
+from safetensors.torch import save_file
+
+
+controlnet = QwenImageBlockWiseControlNet().to(dtype=torch.bfloat16, device="cuda")
+controlnet.init_weight()
+state_dict_controlnet = controlnet.state_dict()
+
+print(hash_state_dict_keys(state_dict_controlnet))
+save_file(state_dict_controlnet, "models/blockwise_controlnet.safetensors")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Inpaint-Initialize.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Inpaint-Initialize.py
new file mode 100644
index 0000000000000000000000000000000000000000..83111894709eda93fbab925ca1804e2a3fd477dd
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/scripts/Qwen-Image-Blockwise-ControlNet-Inpaint-Initialize.py
@@ -0,0 +1,12 @@
+# This script is for initializing a Inpaint Qwen-Image-ControlNet
+import torch
+from diffsynth import hash_state_dict_keys
+from diffsynth.models.qwen_image_controlnet import QwenImageBlockWiseControlNet
+from safetensors.torch import save_file
+
+controlnet = QwenImageBlockWiseControlNet(additional_in_dim=4).to(dtype=torch.bfloat16, device="cuda")
+controlnet.init_weight()
+state_dict_controlnet = controlnet.state_dict()
+
+print(hash_state_dict_keys(state_dict_controlnet))
+save_file(state_dict_controlnet, "models/blockwise_controlnet_inpaint.safetensors")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/train.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee6752da1d415b5c949ac9ed9a48de94caddb794
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/train.py
@@ -0,0 +1,148 @@
+import torch, os, json
+from diffsynth import load_state_dict
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+from diffsynth.pipelines.flux_image_new import ControlNetInput
+from diffsynth.trainers.utils import DiffusionTrainingModule, ImageDataset, ModelLogger, launch_training_task, qwen_image_parser
+os.environ["TOKENIZERS_PARALLELISM"] = "false"
+
+
+
+class QwenImageTrainingModule(DiffusionTrainingModule):
+ def __init__(
+ self,
+ model_paths=None, model_id_with_origin_paths=None,
+ tokenizer_path=None, processor_path=None,
+ trainable_models=None,
+ lora_base_model=None, lora_target_modules="", lora_rank=32, lora_checkpoint=None,
+ use_gradient_checkpointing=True,
+ use_gradient_checkpointing_offload=False,
+ extra_inputs=None,
+ enable_fp8_training=False,
+ ):
+ super().__init__()
+ # Load models
+ offload_dtype = torch.float8_e4m3fn if enable_fp8_training else None
+ model_configs = []
+ if model_paths is not None:
+ model_paths = json.loads(model_paths)
+ model_configs += [ModelConfig(path=path, offload_dtype=offload_dtype) for path in model_paths]
+ if model_id_with_origin_paths is not None:
+ model_id_with_origin_paths = model_id_with_origin_paths.split(",")
+ model_configs += [ModelConfig(model_id=i.split(":")[0], origin_file_pattern=i.split(":")[1], offload_dtype=offload_dtype) for i in model_id_with_origin_paths]
+
+ tokenizer_config = ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/") if tokenizer_path is None else ModelConfig(tokenizer_path)
+ processor_config = ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/") if processor_path is None else ModelConfig(processor_path)
+ self.pipe = QwenImagePipeline.from_pretrained(torch_dtype=torch.bfloat16, device="cpu", model_configs=model_configs, tokenizer_config=tokenizer_config, processor_config=processor_config)
+
+ # Enable FP8
+ if enable_fp8_training:
+ self.pipe._enable_fp8_lora_training(torch.float8_e4m3fn)
+
+ # Reset training scheduler (do it in each training step)
+ self.pipe.scheduler.set_timesteps(1000, training=True)
+
+ # Freeze untrainable models
+ self.pipe.freeze_except([] if trainable_models is None else trainable_models.split(","))
+
+ # Add LoRA to the base models
+ if lora_base_model is not None:
+ model = self.add_lora_to_model(
+ getattr(self.pipe, lora_base_model),
+ target_modules=lora_target_modules.split(","),
+ lora_rank=lora_rank,
+ upcast_dtype=self.pipe.torch_dtype,
+ )
+ if lora_checkpoint is not None:
+ state_dict = load_state_dict(lora_checkpoint)
+ state_dict = self.mapping_lora_state_dict(state_dict)
+ load_result = model.load_state_dict(state_dict, strict=False)
+ print(f"LoRA checkpoint loaded: {lora_checkpoint}, total {len(state_dict)} keys")
+ if len(load_result[1]) > 0:
+ print(f"Warning, LoRA key mismatch! Unexpected keys in LoRA checkpoint: {load_result[1]}")
+ setattr(self.pipe, lora_base_model, model)
+
+ # Store other configs
+ self.use_gradient_checkpointing = use_gradient_checkpointing
+ self.use_gradient_checkpointing_offload = use_gradient_checkpointing_offload
+ self.extra_inputs = extra_inputs.split(",") if extra_inputs is not None else []
+
+
+ def forward_preprocess(self, data):
+ # CFG-sensitive parameters
+ inputs_posi = {"prompt": data["prompt"]}
+ inputs_nega = {"negative_prompt": ""}
+
+ # CFG-unsensitive parameters
+ inputs_shared = {
+ # Assume you are using this pipeline for inference,
+ # please fill in the input parameters.
+ "input_image": data["image"],
+ "height": data["image"].size[1],
+ "width": data["image"].size[0],
+ # Please do not modify the following parameters
+ # unless you clearly know what this will cause.
+ "cfg_scale": 1,
+ "rand_device": self.pipe.device,
+ "use_gradient_checkpointing": self.use_gradient_checkpointing,
+ "use_gradient_checkpointing_offload": self.use_gradient_checkpointing_offload,
+ "edit_image_auto_resize": True,
+ }
+
+ # Extra inputs
+ controlnet_input, blockwise_controlnet_input = {}, {}
+ for extra_input in self.extra_inputs:
+ if extra_input.startswith("blockwise_controlnet_"):
+ blockwise_controlnet_input[extra_input.replace("blockwise_controlnet_", "")] = data[extra_input]
+ elif extra_input.startswith("controlnet_"):
+ controlnet_input[extra_input.replace("controlnet_", "")] = data[extra_input]
+ else:
+ inputs_shared[extra_input] = data[extra_input]
+ if len(controlnet_input) > 0:
+ inputs_shared["controlnet_inputs"] = [ControlNetInput(**controlnet_input)]
+ if len(blockwise_controlnet_input) > 0:
+ inputs_shared["blockwise_controlnet_inputs"] = [ControlNetInput(**blockwise_controlnet_input)]
+
+ # Pipeline units will automatically process the input parameters.
+ for unit in self.pipe.units:
+ inputs_shared, inputs_posi, inputs_nega = self.pipe.unit_runner(unit, self.pipe, inputs_shared, inputs_posi, inputs_nega)
+ return {**inputs_shared, **inputs_posi}
+
+
+ def forward(self, data, inputs=None):
+ if inputs is None: inputs = self.forward_preprocess(data)
+ models = {name: getattr(self.pipe, name) for name in self.pipe.in_iteration_models}
+ loss = self.pipe.training_loss(**models, **inputs)
+ return loss
+
+
+
+if __name__ == "__main__":
+ parser = qwen_image_parser()
+ args = parser.parse_args()
+ dataset = ImageDataset(args=args)
+ model = QwenImageTrainingModule(
+ model_paths=args.model_paths,
+ model_id_with_origin_paths=args.model_id_with_origin_paths,
+ tokenizer_path=args.tokenizer_path,
+ processor_path=args.processor_path,
+ trainable_models=args.trainable_models,
+ lora_base_model=args.lora_base_model,
+ lora_target_modules=args.lora_target_modules,
+ lora_rank=args.lora_rank,
+ lora_checkpoint=args.lora_checkpoint,
+ use_gradient_checkpointing=args.use_gradient_checkpointing,
+ use_gradient_checkpointing_offload=args.use_gradient_checkpointing_offload,
+ extra_inputs=args.extra_inputs,
+ enable_fp8_training=args.enable_fp8_training,
+ )
+ model_logger = ModelLogger(args.output_path, remove_prefix_in_ckpt=args.remove_prefix_in_ckpt)
+ optimizer = torch.optim.AdamW(model.trainable_modules(), lr=args.learning_rate, weight_decay=args.weight_decay)
+ scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer)
+ launch_training_task(
+ dataset, model, model_logger, optimizer, scheduler,
+ num_epochs=args.num_epochs,
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
+ save_steps=args.save_steps,
+ find_unused_parameters=args.find_unused_parameters,
+ num_workers=args.dataset_num_workers,
+ )
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Canny.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Canny.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ae4d5bb72444f8d7550dc1856a2a0275b224ed9
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Canny.py
@@ -0,0 +1,31 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
+from PIL import Image
+import torch
+from modelscope import dataset_snapshot_download
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ModelConfig(path="models/train/Qwen-Image-Blockwise-ControlNet-Canny_full/epoch-1.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_image_dataset",
+ local_dir="./data/example_image_dataset",
+ allow_file_pattern="canny/image_1.jpg"
+)
+controlnet_image = Image.open("data/example_image_dataset/canny/image_1.jpg").resize((1328, 1328))
+
+prompt = "一只小狗,毛发光洁柔顺,眼神灵动,背景是樱花纷飞的春日庭院,唯美温馨。"
+image = pipe(
+ prompt, seed=0,
+ blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image)]
+)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Depth.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Depth.py
new file mode 100644
index 0000000000000000000000000000000000000000..18b597e1fa7508127c431aa4a466c86392a83044
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Depth.py
@@ -0,0 +1,31 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
+from PIL import Image
+import torch
+from modelscope import dataset_snapshot_download
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ModelConfig(path="models/train/Qwen-Image-Blockwise-ControlNet-Depth_full/epoch-1.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_image_dataset",
+ local_dir="./data/example_image_dataset",
+ allow_file_pattern="depth/image_1.jpg"
+)
+controlnet_image = Image.open("data/example_image_dataset/depth/image_1.jpg").resize((1328, 1328))
+
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(
+ prompt, seed=0,
+ blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image)]
+)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Inpaint.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Inpaint.py
new file mode 100644
index 0000000000000000000000000000000000000000..15a15b4df8bd9608f8ce6782a309d37699b923d1
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Blockwise-ControlNet-Inpaint.py
@@ -0,0 +1,32 @@
+import torch
+from PIL import Image
+from modelscope import dataset_snapshot_download
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ModelConfig(path="models/train/Qwen-Image-Blockwise-ControlNet-Inpaint_full/epoch-1.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_image_dataset",
+ local_dir="./data/example_image_dataset",
+ allow_file_pattern="inpaint/*.jpg"
+)
+prompt = "a cat with sunglasses"
+controlnet_image = Image.open("./data/example_image_dataset/inpaint/image_1.jpg").convert("RGB").resize((1024, 1024))
+inpaint_mask = Image.open("./data/example_image_dataset/inpaint/mask.jpg").convert("RGB").resize((1024, 1024))
+image = pipe(
+ prompt, seed=0,
+ blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image, inpaint_mask=inpaint_mask)],
+ height=1024, width=1024,
+ num_inference_steps=40,
+)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Distill-Full.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Distill-Full.py
new file mode 100644
index 0000000000000000000000000000000000000000..07389c524c2e142679d866257785030eb15795a2
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Distill-Full.py
@@ -0,0 +1,20 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+from diffsynth import load_state_dict
+import torch
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Distill-Full", origin_file_pattern="diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+state_dict = load_state_dict("models/train/Qwen-Image-Distill-Full_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+prompt = "a dog"
+image = pipe(prompt, seed=0, num_inference_steps=15, cfg_scale=1)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Edit.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Edit.py
new file mode 100644
index 0000000000000000000000000000000000000000..c08b4850c4d20925a7d008853d11b213ee3d69b6
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image-Edit.py
@@ -0,0 +1,23 @@
+import torch
+from PIL import Image
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+from diffsynth import load_state_dict
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=None,
+ processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"),
+)
+state_dict = load_state_dict("models/train/Qwen-Image-Edit_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+
+prompt = "将裙子改为粉色"
+image = Image.open("data/example_image_dataset/edit/image1.jpg").resize((1024, 1024))
+image = pipe(prompt, edit_image=image, seed=0, num_inference_steps=40, height=1024, width=1024)
+image.save(f"image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image.py
new file mode 100644
index 0000000000000000000000000000000000000000..872321825eb4028fe18d5b00c8ee815bc6356e75
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_full/Qwen-Image.py
@@ -0,0 +1,20 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+from diffsynth import load_state_dict
+import torch
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+state_dict = load_state_dict("models/train/Qwen-Image_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+prompt = "a dog"
+image = pipe(prompt, seed=0)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Canny.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Canny.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a54b5ee07316dcca50e439ec1083d392305ce18
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Canny.py
@@ -0,0 +1,32 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
+from PIL import Image
+import torch
+from modelscope import dataset_snapshot_download
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Canny", origin_file_pattern="model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.load_lora(pipe.dit, "models/train/Qwen-Image-Blockwise-ControlNet-Canny_lora/epoch-4.safetensors")
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_image_dataset",
+ local_dir="./data/example_image_dataset",
+ allow_file_pattern="canny/image_1.jpg"
+)
+controlnet_image = Image.open("data/example_image_dataset/canny/image_1.jpg").resize((1328, 1328))
+
+prompt = "一只小狗,毛发光洁柔顺,眼神灵动,背景是樱花纷飞的春日庭院,唯美温馨。"
+image = pipe(
+ prompt, seed=0,
+ blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image)]
+)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Depth.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Depth.py
new file mode 100644
index 0000000000000000000000000000000000000000..626654559d1970697a2b9a12af3722581956bfbb
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Depth.py
@@ -0,0 +1,33 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
+from PIL import Image
+import torch
+from modelscope import dataset_snapshot_download
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Depth", origin_file_pattern="model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.load_lora(pipe.dit, "models/train/Qwen-Image-Blockwise-ControlNet-Depth_lora/epoch-4.safetensors")
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_image_dataset",
+ local_dir="./data/example_image_dataset",
+ allow_file_pattern="depth/image_1.jpg"
+)
+
+controlnet_image = Image.open("data/example_image_dataset/depth/image_1.jpg").resize((1328, 1328))
+
+prompt = "精致肖像,水下少女,蓝裙飘逸,发丝轻扬,光影透澈,气泡环绕,面容恬静,细节精致,梦幻唯美。"
+image = pipe(
+ prompt, seed=0,
+ blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image)]
+)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Inpaint.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Inpaint.py
new file mode 100644
index 0000000000000000000000000000000000000000..60bd9f2bbbefd95ab0efe1c7e07aaeb96f4a560d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Blockwise-ControlNet-Inpaint.py
@@ -0,0 +1,34 @@
+import torch
+from PIL import Image
+from modelscope import dataset_snapshot_download
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig, ControlNetInput
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Blockwise-ControlNet-Inpaint", origin_file_pattern="model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.load_lora(pipe.dit, "models/train/Qwen-Image-Blockwise-ControlNet-Inpaint_lora/epoch-4.safetensors")
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_image_dataset",
+ local_dir="./data/example_image_dataset",
+ allow_file_pattern="inpaint/*.jpg"
+)
+prompt = "a cat with sunglasses"
+controlnet_image = Image.open("./data/example_image_dataset/inpaint/image_1.jpg").convert("RGB").resize((1024, 1024))
+inpaint_mask = Image.open("./data/example_image_dataset/inpaint/mask.jpg").convert("RGB").resize((1024, 1024))
+image = pipe(
+ prompt, seed=0,
+ blockwise_controlnet_inputs=[ControlNetInput(image=controlnet_image, inpaint_mask=inpaint_mask)],
+ height=1024, width=1024,
+ num_inference_steps=40,
+)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Distill-Full.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Distill-Full.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f644aa805d47bdc04711b28d5ea0d3d3eafc36e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Distill-Full.py
@@ -0,0 +1,18 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Distill-Full", origin_file_pattern="diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.load_lora(pipe.dit, "models/train/Qwen-Image-Distill-Full_lora/epoch-4.safetensors")
+prompt = "a dog"
+image = pipe(prompt, seed=0, num_inference_steps=15, cfg_scale=1)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Edit.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Edit.py
new file mode 100644
index 0000000000000000000000000000000000000000..2576be339df5fba385c37567af0902842c306289
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-Edit.py
@@ -0,0 +1,21 @@
+import torch
+from PIL import Image
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=None,
+ processor_config=ModelConfig(model_id="Qwen/Qwen-Image-Edit", origin_file_pattern="processor/"),
+)
+pipe.load_lora(pipe.dit, "models/train/Qwen-Image-Edit_lora/epoch-4.safetensors")
+
+prompt = "将裙子改为粉色"
+image = Image.open("data/example_image_dataset/edit/image1.jpg").resize((1024, 1024))
+image = pipe(prompt, edit_image=image, seed=0, num_inference_steps=40, height=1024, width=1024)
+image.save(f"image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-EliGen.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-EliGen.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd7904e9d844232c599b7534bc58398e1820702b
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-EliGen.py
@@ -0,0 +1,29 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+from PIL import Image
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.load_lora(pipe.dit, "models/train/Qwen-Image-EliGen_lora/epoch-4.safetensors")
+
+
+entity_prompts = ["A beautiful girl", "sign 'Entity Control'", "shorts", "shirt"]
+global_prompt = "A beautiful girl wearing shirt and shorts in the street, holding a sign 'Entity Control'"
+masks = [Image.open(f"data/example_image_dataset/eligen/{i}.png").convert('RGB') for i in range(len(entity_prompts))]
+
+image = pipe(global_prompt,
+ seed=0,
+ height=1024,
+ width=1024,
+ eligen_entity_prompts=entity_prompts,
+ eligen_entity_masks=masks)
+image.save("Qwen-Image_EliGen.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-In-Context-Control-Union.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-In-Context-Control-Union.py
new file mode 100644
index 0000000000000000000000000000000000000000..83a93a3f6bf41529c9350052e03f8f95050c8eb5
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image-In-Context-Control-Union.py
@@ -0,0 +1,19 @@
+from PIL import Image
+import torch
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.load_lora(pipe.dit, "models/train/Qwen-Image-In-Context-Control-Union_lora/epoch-4.safetensors")
+image = Image.open("data/example_image_dataset/canny/image_1.jpg").resize((1024, 1024))
+prompt = "Context_Control. a dog"
+image = pipe(prompt=prompt, seed=0, context_image=image, height=1024, width=1024)
+image.save("image_context.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image.py
new file mode 100644
index 0000000000000000000000000000000000000000..16be2b4bda15c696c2b7bb4f3fd36176db67da0e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/qwen_image/model_training/validate_lora/Qwen-Image.py
@@ -0,0 +1,18 @@
+from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig
+import torch
+
+
+pipe = QwenImagePipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"),
+ ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"),
+ ],
+ tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
+)
+pipe.load_lora(pipe.dit, "models/train/Qwen-Image_lora/epoch-4.safetensors")
+prompt = "a dog"
+image = pipe(prompt, seed=0)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/step1x/step1x.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/step1x/step1x.py
new file mode 100644
index 0000000000000000000000000000000000000000..80de280d5327fb4dc37d4eef0227e558d5f06281
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/step1x/step1x.py
@@ -0,0 +1,35 @@
+import torch
+from diffsynth import FluxImagePipeline, ModelManager
+from modelscope import snapshot_download
+from PIL import Image
+import numpy as np
+
+
+snapshot_download("Qwen/Qwen2.5-VL-7B-Instruct", cache_dir="./models")
+snapshot_download("stepfun-ai/Step1X-Edit", cache_dir="./models")
+
+model_manager = ModelManager(torch_dtype=torch.bfloat16, device="cuda")
+model_manager.load_models([
+ "models/Qwen/Qwen2.5-VL-7B-Instruct",
+ "models/stepfun-ai/Step1X-Edit/step1x-edit-i1258.safetensors",
+ "models/stepfun-ai/Step1X-Edit/vae.safetensors",
+])
+pipe = FluxImagePipeline.from_model_manager(model_manager)
+pipe.enable_vram_management()
+
+image = Image.fromarray(np.zeros((1248, 832, 3), dtype=np.uint8) + 255)
+image = pipe(
+ prompt="draw red flowers in Chinese ink painting style",
+ step1x_reference_image=image,
+ width=832, height=1248, cfg_scale=6,
+ seed=1,
+)
+image.save("image_1.jpg")
+
+image = pipe(
+ prompt="add more flowers in Chinese ink painting style",
+ step1x_reference_image=image,
+ width=832, height=1248, cfg_scale=6,
+ seed=2,
+)
+image.save("image_2.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/stepvideo/README.md b/exp_code/1_benchmark/DiffSynth-Studio/examples/stepvideo/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ad2f40484d9fdb94fec87b8be16f02e10aca71bb
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/stepvideo/README.md
@@ -0,0 +1,19 @@
+# Stepvideo
+
+StepVideo is a state-of-the-art (SoTA) text-to-video pre-trained model with 30 billion parameters and the capability to generate videos up to 204 frames.
+
+* Model: https://modelscope.cn/models/stepfun-ai/stepvideo-t2v/summary
+* GitHub: https://github.com/stepfun-ai/Step-Video-T2V
+* Technical report: https://arxiv.org/abs/2502.10248
+
+## Examples
+
+For original BF16 version, please see [`./stepvideo_text_to_video.py`](./stepvideo_text_to_video.py). 80G VRAM required.
+
+We also support auto-offload, which can reduce the VRAM requirement to **24GB**; however, it requires 2x time for inference. Please see [`./stepvideo_text_to_video_low_vram.py`](./stepvideo_text_to_video_low_vram.py).
+
+https://github.com/user-attachments/assets/5954fdaa-a3cf-45a3-bd35-886e3cc4581b
+
+For FP8 quantized version, please see [`./stepvideo_text_to_video_quantized.py`](./stepvideo_text_to_video_quantized.py). 40G VRAM required.
+
+https://github.com/user-attachments/assets/f3697f4e-bc08-47d2-b00a-32d7dfa272ad
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/stepvideo/stepvideo_text_to_video.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/stepvideo/stepvideo_text_to_video.py
new file mode 100644
index 0000000000000000000000000000000000000000..302ed08d43fb3ab1a29406b5ff4b81f134bf072e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/stepvideo/stepvideo_text_to_video.py
@@ -0,0 +1,50 @@
+from modelscope import snapshot_download
+from diffsynth import ModelManager, StepVideoPipeline, save_video
+import torch
+
+
+# Download models
+snapshot_download(model_id="stepfun-ai/stepvideo-t2v", cache_dir="models")
+
+# Load the compiled attention for the LLM text encoder.
+# If you encounter errors here. Please select other compiled file that matches your environment or delete this line.
+torch.ops.load_library("models/stepfun-ai/stepvideo-t2v/lib/liboptimus_ths-torch2.5-cu124.cpython-310-x86_64-linux-gnu.so")
+
+# Load models
+model_manager = ModelManager()
+model_manager.load_models(
+ ["models/stepfun-ai/stepvideo-t2v/hunyuan_clip/clip_text_encoder/pytorch_model.bin"],
+ torch_dtype=torch.float32, device="cpu"
+)
+model_manager.load_models(
+ [
+ "models/stepfun-ai/stepvideo-t2v/step_llm",
+ "models/stepfun-ai/stepvideo-t2v/vae/vae_v2.safetensors",
+ [
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00001-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00002-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00003-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00004-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00005-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00006-of-00006.safetensors",
+ ]
+ ],
+ torch_dtype=torch.bfloat16, device="cpu"
+)
+pipe = StepVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda")
+
+# Enable VRAM management
+# This model requires 80G VRAM.
+# In order to reduce VRAM required, please set `num_persistent_param_in_dit` to a small number.
+pipe.enable_vram_management(num_persistent_param_in_dit=None)
+
+# Run!
+video = pipe(
+ prompt="一名宇航员在月球上发现一块石碑,上面印有“stepfun”字样,闪闪发光。超高清、HDR 视频、环境光、杜比全景声、画面稳定、流畅动作、逼真的细节、专业级构图、超现实主义、自然、生动、超细节、清晰。",
+ negative_prompt="画面暗、低分辨率、不良手、文本、缺少手指、多余的手指、裁剪、低质量、颗粒状、签名、水印、用户名、模糊。",
+ num_inference_steps=30, cfg_scale=9, num_frames=51, seed=1
+)
+save_video(
+ video, "video.mp4", fps=25, quality=5,
+ ffmpeg_params=["-vf", "atadenoise=0a=0.1:0b=0.1:1a=0.1:1b=0.1"]
+)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/stepvideo/stepvideo_text_to_video_low_vram.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/stepvideo/stepvideo_text_to_video_low_vram.py
new file mode 100644
index 0000000000000000000000000000000000000000..f75ae1606855f062bbc36db82aa681f02d941cda
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/stepvideo/stepvideo_text_to_video_low_vram.py
@@ -0,0 +1,54 @@
+from modelscope import snapshot_download
+from diffsynth import ModelManager, StepVideoPipeline, save_video
+import torch
+
+
+# Download models
+snapshot_download(model_id="stepfun-ai/stepvideo-t2v", cache_dir="models")
+
+# Load the compiled attention for the LLM text encoder.
+# If you encounter errors here. Please select other compiled file that matches your environment or delete this line.
+torch.ops.load_library("models/stepfun-ai/stepvideo-t2v/lib/liboptimus_ths-torch2.5-cu124.cpython-310-x86_64-linux-gnu.so")
+
+# Load models
+model_manager = ModelManager()
+model_manager.load_models(
+ ["models/stepfun-ai/stepvideo-t2v/hunyuan_clip/clip_text_encoder/pytorch_model.bin"],
+ torch_dtype=torch.float32, device="cpu"
+)
+model_manager.load_models(
+ [
+ "models/stepfun-ai/stepvideo-t2v/step_llm",
+ [
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00001-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00002-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00003-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00004-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00005-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00006-of-00006.safetensors",
+ ]
+ ],
+ torch_dtype=torch.bfloat16, device="cpu" # You can set torch_dtype=torch.bfloat16 to reduce RAM (not VRAM) usage.
+)
+model_manager.load_models(
+ ["models/stepfun-ai/stepvideo-t2v/vae/vae_v2.safetensors"],
+ torch_dtype=torch.bfloat16, device="cpu"
+)
+pipe = StepVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda")
+
+# Enable VRAM management
+# This model requires 24G VRAM.
+# In order to speed up, please set `num_persistent_param_in_dit` to a large number or None (unlimited).
+pipe.enable_vram_management(num_persistent_param_in_dit=0)
+
+# Run!
+video = pipe(
+ prompt="一名宇航员在月球上发现一块石碑,上面印有“stepfun”字样,闪闪发光。超高清、HDR 视频、环境光、杜比全景声、画面稳定、流畅动作、逼真的细节、专业级构图、超现实主义、自然、生动、超细节、清晰。",
+ negative_prompt="画面暗、低分辨率、不良手、文本、缺少手指、多余的手指、裁剪、低质量、颗粒状、签名、水印、用户名、模糊。",
+ num_inference_steps=30, cfg_scale=9, num_frames=51, seed=1,
+ tiled=True, tile_size=(34, 34), tile_stride=(16, 16)
+)
+save_video(
+ video, "video.mp4", fps=25, quality=5,
+ ffmpeg_params=["-vf", "atadenoise=0a=0.1:0b=0.1:1a=0.1:1b=0.1"]
+)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/stepvideo/stepvideo_text_to_video_quantized.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/stepvideo/stepvideo_text_to_video_quantized.py
new file mode 100644
index 0000000000000000000000000000000000000000..7868eb1b5fb7f5a71ea58ee166b39da770d8cf60
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/stepvideo/stepvideo_text_to_video_quantized.py
@@ -0,0 +1,53 @@
+from modelscope import snapshot_download
+from diffsynth import ModelManager, StepVideoPipeline, save_video
+import torch
+
+
+# Download models
+snapshot_download(model_id="stepfun-ai/stepvideo-t2v", cache_dir="models")
+
+# Load the compiled attention for the LLM text encoder.
+# If you encounter errors here. Please select other compiled file that matches your environment or delete this line.
+torch.ops.load_library("models/stepfun-ai/stepvideo-t2v/lib/liboptimus_ths-torch2.5-cu124.cpython-310-x86_64-linux-gnu.so")
+
+# Load models
+model_manager = ModelManager()
+model_manager.load_models(
+ ["models/stepfun-ai/stepvideo-t2v/hunyuan_clip/clip_text_encoder/pytorch_model.bin"],
+ torch_dtype=torch.float32, device="cpu"
+)
+model_manager.load_models(
+ [
+ "models/stepfun-ai/stepvideo-t2v/step_llm",
+ [
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00001-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00002-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00003-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00004-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00005-of-00006.safetensors",
+ "models/stepfun-ai/stepvideo-t2v/transformer/diffusion_pytorch_model-00006-of-00006.safetensors",
+ ]
+ ],
+ torch_dtype=torch.float8_e4m3fn, device="cpu"
+)
+model_manager.load_models(
+ ["models/stepfun-ai/stepvideo-t2v/vae/vae_v2.safetensors"],
+ torch_dtype=torch.bfloat16, device="cpu"
+)
+pipe = StepVideoPipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda")
+
+# Enable VRAM management
+# This model requires 40G VRAM.
+# In order to reduce VRAM required, please set `num_persistent_param_in_dit` to a small number.
+pipe.enable_vram_management(num_persistent_param_in_dit=None)
+
+# Run!
+video = pipe(
+ prompt="一名宇航员在月球上发现一块石碑,上面印有“stepfun”字样,闪闪发光。超高清、HDR 视频、环境光、杜比全景声、画面稳定、流畅动作、逼真的细节、专业级构图、超现实主义、自然、生动、超细节、清晰。",
+ negative_prompt="画面暗、低分辨率、不良手、文本、缺少手指、多余的手指、裁剪、低质量、颗粒状、签名、水印、用户名、模糊。",
+ num_inference_steps=30, cfg_scale=9, num_frames=51, seed=1
+)
+save_video(
+ video, "video.mp4", fps=25, quality=5,
+ ffmpeg_params=["-vf", "atadenoise=0a=0.1:0b=0.1:1a=0.1:1b=0.1"]
+)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/train/README.md b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..fa99312d4518a892b4f6aca1cb206ba3edbd5632
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/README.md
@@ -0,0 +1,569 @@
+# DiffSynth Training Framework
+
+We have implemented a training framework for text-to-image Diffusion models, enabling users to easily train LoRA models using our framework. Our provided scripts come with the following advantages:
+
+* **Comprehensive Functionality & User-Friendliness**: Our training framework supports multi-GPU and multi-machine setups, facilitates the use of DeepSpeed for acceleration, and includes gradient checkpointing optimizations for models with excessive memory demands.
+* **Code Conciseness & Researcher Accessibility**: We avoid large blocks of complicated code. General-purpose modules are implemented in `diffsynth/trainers/text_to_image.py`, while model-specific training scripts contain only minimal code pertinent to the model architecture, making it researcher-friendly.
+* **Modular Design & Developer Flexibility**: Built on the universal Pytorch-Lightning framework, our training framework is decoupled in terms of functionality, allowing developers to easily introduce additional training techniques by modifying our scripts to suit their needs.
+
+Image Examples of fine-tuned LoRA. The prompt is "一只小狗蹦蹦跳跳,周围是姹紫嫣红的鲜花,远处是山脉" (for Chinese models) or "a dog is jumping, flowers around the dog, the background is mountains and clouds" (for English models).
+
+||FLUX.1-dev|Kolors|Stable Diffusion 3|Hunyuan-DiT|
+|-|-|-|-|-|
+|Without LoRA|||||
+|With LoRA|||||
+
+## Install additional packages
+
+```
+pip install peft lightning pandas
+```
+
+## Prepare your dataset
+
+We provide an example dataset [here](https://modelscope.cn/datasets/buptwq/lora-stable-diffusion-finetune/files). You need to manage the training images as follows:
+
+```
+data/dog/
+└── train
+ ├── 00.jpg
+ ├── 01.jpg
+ ├── 02.jpg
+ ├── 03.jpg
+ ├── 04.jpg
+ └── metadata.csv
+```
+
+`metadata.csv`:
+
+```
+file_name,text
+00.jpg,a dog
+01.jpg,a dog
+02.jpg,a dog
+03.jpg,a dog
+04.jpg,a dog
+```
+
+Note that if the model is Chinese model (for example, Hunyuan-DiT and Kolors), we recommend to use Chinese texts in the dataset. For example
+
+```
+file_name,text
+00.jpg,一只小狗
+01.jpg,一只小狗
+02.jpg,一只小狗
+03.jpg,一只小狗
+04.jpg,一只小狗
+```
+
+## Train a LoRA model
+
+General options:
+
+```
+ --lora_target_modules LORA_TARGET_MODULES
+ Layers with LoRA modules.
+ --dataset_path DATASET_PATH
+ The path of the Dataset.
+ --output_path OUTPUT_PATH
+ Path to save the model.
+ --steps_per_epoch STEPS_PER_EPOCH
+ Number of steps per epoch.
+ --height HEIGHT Image height.
+ --width WIDTH Image width.
+ --center_crop Whether to center crop the input images to the resolution. If not set, the images will be randomly cropped. The images will be resized to the resolution first before cropping.
+ --random_flip Whether to randomly flip images horizontally
+ --batch_size BATCH_SIZE
+ Batch size (per device) for the training dataloader.
+ --dataloader_num_workers DATALOADER_NUM_WORKERS
+ Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process.
+ --precision {32,16,16-mixed}
+ Training precision
+ --learning_rate LEARNING_RATE
+ Learning rate.
+ --lora_rank LORA_RANK
+ The dimension of the LoRA update matrices.
+ --lora_alpha LORA_ALPHA
+ The weight of the LoRA update matrices.
+ --use_gradient_checkpointing
+ Whether to use gradient checkpointing.
+ --accumulate_grad_batches ACCUMULATE_GRAD_BATCHES
+ The number of batches in gradient accumulation.
+ --training_strategy {auto,deepspeed_stage_1,deepspeed_stage_2,deepspeed_stage_3}
+ Training strategy
+ --max_epochs MAX_EPOCHS
+ Number of epochs.
+ --modelscope_model_id MODELSCOPE_MODEL_ID
+ Model ID on ModelScope (https://www.modelscope.cn/). The model will be uploaded to ModelScope automatically if you provide a Model ID.
+ --modelscope_access_token MODELSCOPE_ACCESS_TOKEN
+ Access key on ModelScope (https://www.modelscope.cn/). Required if you want to upload the model to ModelScope.
+```
+
+### FLUX
+
+The following files will be used for constructing FLUX. You can download them from [huggingface](https://huggingface.co/black-forest-labs/FLUX.1-dev) or [modelscope](https://www.modelscope.cn/models/ai-modelscope/flux.1-dev). You can use the following code to download these files:
+
+```python
+from diffsynth import download_models
+
+download_models(["FLUX.1-dev"])
+```
+
+```
+models/FLUX/
+└── FLUX.1-dev
+ ├── ae.safetensors
+ ├── flux1-dev.safetensors
+ ├── text_encoder
+ │ └── model.safetensors
+ └── text_encoder_2
+ ├── config.json
+ ├── model-00001-of-00002.safetensors
+ ├── model-00002-of-00002.safetensors
+ └── model.safetensors.index.json
+```
+
+Launch the training task using the following command (39G VRAM required):
+
+```
+CUDA_VISIBLE_DEVICES="0" python examples/train/flux/train_flux_lora.py \
+ --pretrained_text_encoder_path models/FLUX/FLUX.1-dev/text_encoder/model.safetensors \
+ --pretrained_text_encoder_2_path models/FLUX/FLUX.1-dev/text_encoder_2 \
+ --pretrained_dit_path models/FLUX/FLUX.1-dev/flux1-dev.safetensors \
+ --pretrained_vae_path models/FLUX/FLUX.1-dev/ae.safetensors \
+ --dataset_path data/dog \
+ --output_path ./models \
+ --max_epochs 1 \
+ --steps_per_epoch 100 \
+ --height 1024 \
+ --width 1024 \
+ --center_crop \
+ --precision "bf16" \
+ --learning_rate 1e-4 \
+ --lora_rank 16 \
+ --lora_alpha 16 \
+ --use_gradient_checkpointing \
+ --align_to_opensource_format
+```
+
+By adding parameter `--quantize "float8_e4m3fn"`, you can save approximate 10G VRAM.
+
+**`--align_to_opensource_format` means that this script will export the LoRA weights in the opensource format. This format can be loaded in both DiffSynth-Studio and other codebases.**
+
+For more information about the parameters, please use `python examples/train/flux/train_flux_lora.py -h` to see the details.
+
+After training, use `model_manager.load_lora` to load the LoRA for inference.
+
+```python
+from diffsynth import ModelManager, FluxImagePipeline
+import torch
+
+model_manager = ModelManager(torch_dtype=torch.bfloat16, device="cuda",
+ file_path_list=[
+ "models/FLUX/FLUX.1-dev/text_encoder/model.safetensors",
+ "models/FLUX/FLUX.1-dev/text_encoder_2",
+ "models/FLUX/FLUX.1-dev/ae.safetensors",
+ "models/FLUX/FLUX.1-dev/flux1-dev.safetensors"
+ ])
+model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
+pipe = FluxImagePipeline.from_model_manager(model_manager)
+
+torch.manual_seed(0)
+image = pipe(
+ prompt="a dog is jumping, flowers around the dog, the background is mountains and clouds",
+ num_inference_steps=30, embedded_guidance=3.5
+)
+image.save("image_with_lora.jpg")
+```
+
+### Kolors
+
+The following files will be used for constructing Kolors. You can download Kolors from [huggingface](https://huggingface.co/Kwai-Kolors/Kolors) or [modelscope](https://modelscope.cn/models/Kwai-Kolors/Kolors). Due to precision overflow issues, we need to download an additional VAE model (from [huggingface](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix) or [modelscope](https://modelscope.cn/models/AI-ModelScope/sdxl-vae-fp16-fix)). You can use the following code to download these files:
+
+```python
+from diffsynth import download_models
+
+download_models(["Kolors", "SDXL-vae-fp16-fix"])
+```
+
+```
+models
+├── kolors
+│ └── Kolors
+│ ├── text_encoder
+│ │ ├── config.json
+│ │ ├── pytorch_model-00001-of-00007.bin
+│ │ ├── pytorch_model-00002-of-00007.bin
+│ │ ├── pytorch_model-00003-of-00007.bin
+│ │ ├── pytorch_model-00004-of-00007.bin
+│ │ ├── pytorch_model-00005-of-00007.bin
+│ │ ├── pytorch_model-00006-of-00007.bin
+│ │ ├── pytorch_model-00007-of-00007.bin
+│ │ └── pytorch_model.bin.index.json
+│ ├── unet
+│ │ └── diffusion_pytorch_model.safetensors
+│ └── vae
+│ └── diffusion_pytorch_model.safetensors
+└── sdxl-vae-fp16-fix
+ └── diffusion_pytorch_model.safetensors
+```
+
+Launch the training task using the following command:
+
+```
+CUDA_VISIBLE_DEVICES="0" python examples/train/kolors/train_kolors_lora.py \
+ --pretrained_unet_path models/kolors/Kolors/unet/diffusion_pytorch_model.safetensors \
+ --pretrained_text_encoder_path models/kolors/Kolors/text_encoder \
+ --pretrained_fp16_vae_path models/sdxl-vae-fp16-fix/diffusion_pytorch_model.safetensors \
+ --dataset_path data/dog \
+ --output_path ./models \
+ --max_epochs 1 \
+ --steps_per_epoch 500 \
+ --height 1024 \
+ --width 1024 \
+ --center_crop \
+ --precision "16-mixed" \
+ --learning_rate 1e-4 \
+ --lora_rank 4 \
+ --lora_alpha 4 \
+ --use_gradient_checkpointing
+```
+
+For more information about the parameters, please use `python examples/train/kolors/train_kolors_lora.py -h` to see the details.
+
+After training, use `model_manager.load_lora` to load the LoRA for inference.
+
+```python
+from diffsynth import ModelManager, SDXLImagePipeline
+import torch
+
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
+ file_path_list=[
+ "models/kolors/Kolors/text_encoder",
+ "models/kolors/Kolors/unet/diffusion_pytorch_model.safetensors",
+ "models/sdxl-vae-fp16-fix/diffusion_pytorch_model.safetensors"
+ ])
+model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
+pipe = SDXLImagePipeline.from_model_manager(model_manager)
+
+torch.manual_seed(0)
+image = pipe(
+ prompt="一只小狗蹦蹦跳跳,周围是姹紫嫣红的鲜花,远处是山脉",
+ negative_prompt="",
+ cfg_scale=7.5,
+ num_inference_steps=100, width=1024, height=1024,
+)
+image.save("image_with_lora.jpg")
+```
+
+### Stable Diffusion 3.5 Series
+
+
+You need to download the text encoders and DiT model files. Please use the following code to download these files:
+
+```python
+from diffsynth import download_models
+
+download_models(["StableDiffusion3.5-large"])
+```
+
+```
+models/stable_diffusion_3
+├── Put Stable Diffusion 3 checkpoints here.txt
+├── sd3.5_large.safetensors
+└── text_encoders
+ ├── clip_g.safetensors
+ ├── clip_l.safetensors
+ └── t5xxl_fp16.safetensors
+```
+
+Launch the training task using the following command:
+
+```
+CUDA_VISIBLE_DEVICES="0" python examples/train/stable_diffusion_3/train_sd3_lora.py \
+ --pretrained_path models/stable_diffusion_3/text_encoders/clip_g.safetensors,models/stable_diffusion_3/text_encoders/clip_l.safetensors,models/stable_diffusion_3/text_encoders/t5xxl_fp16.safetensors,models/stable_diffusion_3/sd3.5_large.safetensors \
+ --dataset_path data/dog \
+ --output_path ./models \
+ --max_epochs 1 \
+ --steps_per_epoch 500 \
+ --height 1024 \
+ --width 1024 \
+ --center_crop \
+ --precision "16" \
+ --learning_rate 1e-4 \
+ --lora_rank 4 \
+ --lora_alpha 4 \
+ --use_gradient_checkpointing
+```
+
+For more information about the parameters, please use `python examples/train/stable_diffusion_3/train_sd3_lora.py -h` to see the details.
+
+After training, use `model_manager.load_lora` to load the LoRA for inference.
+
+```python
+from diffsynth import ModelManager, SD3ImagePipeline
+import torch
+
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
+ file_path_list=[
+ "models/stable_diffusion_3/text_encoders/clip_g.safetensors",
+ "models/stable_diffusion_3/text_encoders/clip_l.safetensors",
+ "models/stable_diffusion_3/text_encoders/t5xxl_fp16.safetensors",
+ "models/stable_diffusion_3/sd3.5_large.safetensors"
+ ])
+model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
+pipe = SD3ImagePipeline.from_model_manager(model_manager)
+
+torch.manual_seed(0)
+image = pipe(
+ prompt="a dog is jumping, flowers around the dog, the background is mountains and clouds",
+ num_inference_steps=30, cfg_scale=7
+)
+image.save("image_with_lora.jpg")
+```
+
+### Stable Diffusion 3
+
+Only one file is required in the training script. You can use [`sd3_medium_incl_clips.safetensors`](https://huggingface.co/stabilityai/stable-diffusion-3-medium/resolve/main/sd3_medium_incl_clips.safetensors) (without T5 encoder) or [`sd3_medium_incl_clips_t5xxlfp16.safetensors`](https://huggingface.co/stabilityai/stable-diffusion-3-medium/resolve/main/sd3_medium_incl_clips_t5xxlfp16.safetensors) (with T5 encoder). Please use the following code to download these files:
+
+```python
+from diffsynth import download_models
+
+download_models(["StableDiffusion3", "StableDiffusion3_without_T5"])
+```
+
+```
+models/stable_diffusion_3/
+├── Put Stable Diffusion 3 checkpoints here.txt
+├── sd3_medium_incl_clips.safetensors
+└── sd3_medium_incl_clips_t5xxlfp16.safetensors
+```
+
+Launch the training task using the following command:
+
+```
+CUDA_VISIBLE_DEVICES="0" python examples/train/stable_diffusion_3/train_sd3_lora.py \
+ --pretrained_path models/stable_diffusion_3/sd3_medium_incl_clips.safetensors \
+ --dataset_path data/dog \
+ --output_path ./models \
+ --max_epochs 1 \
+ --steps_per_epoch 500 \
+ --height 1024 \
+ --width 1024 \
+ --center_crop \
+ --precision "16" \
+ --learning_rate 1e-4 \
+ --lora_rank 4 \
+ --lora_alpha 4 \
+ --use_gradient_checkpointing
+```
+
+For more information about the parameters, please use `python examples/train/stable_diffusion_3/train_sd3_lora.py -h` to see the details.
+
+After training, use `model_manager.load_lora` to load the LoRA for inference.
+
+```python
+from diffsynth import ModelManager, SD3ImagePipeline
+import torch
+
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
+ file_path_list=["models/stable_diffusion_3/sd3_medium_incl_clips.safetensors"])
+model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
+pipe = SD3ImagePipeline.from_model_manager(model_manager)
+
+torch.manual_seed(0)
+image = pipe(
+ prompt="a dog is jumping, flowers around the dog, the background is mountains and clouds",
+ negative_prompt="bad quality, poor quality, doll, disfigured, jpg, toy, bad anatomy, missing limbs, missing fingers, 3d, cgi, extra tails",
+ cfg_scale=7.5,
+ num_inference_steps=100, width=1024, height=1024,
+)
+image.save("image_with_lora.jpg")
+```
+
+### Hunyuan-DiT
+
+Four files will be used for constructing Hunyuan DiT. You can download them from [huggingface](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT) or [modelscope](https://www.modelscope.cn/models/modelscope/HunyuanDiT/summary). You can use the following code to download these files:
+
+```python
+from diffsynth import download_models
+
+download_models(["HunyuanDiT"])
+```
+
+```
+models/HunyuanDiT/
+├── Put Hunyuan DiT checkpoints here.txt
+└── t2i
+ ├── clip_text_encoder
+ │ └── pytorch_model.bin
+ ├── model
+ │ └── pytorch_model_ema.pt
+ ├── mt5
+ │ └── pytorch_model.bin
+ └── sdxl-vae-fp16-fix
+ └── diffusion_pytorch_model.bin
+```
+
+Launch the training task using the following command:
+
+```
+CUDA_VISIBLE_DEVICES="0" python examples/train/hunyuan_dit/train_hunyuan_dit_lora.py \
+ --pretrained_path models/HunyuanDiT/t2i \
+ --dataset_path data/dog \
+ --output_path ./models \
+ --max_epochs 1 \
+ --steps_per_epoch 500 \
+ --height 1024 \
+ --width 1024 \
+ --center_crop \
+ --precision "16-mixed" \
+ --learning_rate 1e-4 \
+ --lora_rank 4 \
+ --lora_alpha 4 \
+ --use_gradient_checkpointing
+```
+
+For more information about the parameters, please use `python examples/train/hunyuan_dit/train_hunyuan_dit_lora.py -h` to see the details.
+
+After training, use `model_manager.load_lora` to load the LoRA for inference.
+
+```python
+from diffsynth import ModelManager, HunyuanDiTImagePipeline
+import torch
+
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
+ file_path_list=[
+ "models/HunyuanDiT/t2i/clip_text_encoder/pytorch_model.bin",
+ "models/HunyuanDiT/t2i/model/pytorch_model_ema.pt",
+ "models/HunyuanDiT/t2i/mt5/pytorch_model.bin",
+ "models/HunyuanDiT/t2i/sdxl-vae-fp16-fix/diffusion_pytorch_model.bin"
+ ])
+model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
+pipe = HunyuanDiTImagePipeline.from_model_manager(model_manager)
+
+torch.manual_seed(0)
+image = pipe(
+ prompt="一只小狗蹦蹦跳跳,周围是姹紫嫣红的鲜花,远处是山脉",
+ negative_prompt="",
+ cfg_scale=7.5,
+ num_inference_steps=100, width=1024, height=1024,
+)
+image.save("image_with_lora.jpg")
+```
+
+### Stable Diffusion
+
+Only one file is required in the training script. We support the mainstream checkpoints in [CivitAI](https://civitai.com/). By default, we use the base Stable Diffusion v1.5. You can download it from [huggingface](https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors) or [modelscope](https://www.modelscope.cn/models/AI-ModelScope/stable-diffusion-v1-5/resolve/master/v1-5-pruned-emaonly.safetensors). You can use the following code to download this file:
+
+```python
+from diffsynth import download_models
+
+download_models(["StableDiffusion_v15"])
+```
+
+```
+models/stable_diffusion
+├── Put Stable Diffusion checkpoints here.txt
+└── v1-5-pruned-emaonly.safetensors
+```
+
+Launch the training task using the following command:
+
+```
+CUDA_VISIBLE_DEVICES="0" python examples/train/stable_diffusion/train_sd_lora.py \
+ --pretrained_path models/stable_diffusion/v1-5-pruned-emaonly.safetensors \
+ --dataset_path data/dog \
+ --output_path ./models \
+ --max_epochs 1 \
+ --steps_per_epoch 500 \
+ --height 512 \
+ --width 512 \
+ --center_crop \
+ --precision "16-mixed" \
+ --learning_rate 1e-4 \
+ --lora_rank 4 \
+ --lora_alpha 4 \
+ --use_gradient_checkpointing
+```
+
+For more information about the parameters, please use `python examples/train/stable_diffusion/train_sd_lora.py -h` to see the details.
+
+After training, use `model_manager.load_lora` to load the LoRA for inference.
+
+```python
+from diffsynth import ModelManager, SDImagePipeline
+import torch
+
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
+ file_path_list=["models/stable_diffusion/v1-5-pruned-emaonly.safetensors"])
+model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
+pipe = SDImagePipeline.from_model_manager(model_manager)
+
+torch.manual_seed(0)
+image = pipe(
+ prompt="a dog is jumping, flowers around the dog, the background is mountains and clouds",
+ negative_prompt="bad quality, poor quality, doll, disfigured, jpg, toy, bad anatomy, missing limbs, missing fingers, 3d, cgi, extra tails",
+ cfg_scale=7.5,
+ num_inference_steps=100, width=512, height=512,
+)
+image.save("image_with_lora.jpg")
+```
+
+### Stable Diffusion XL
+
+Only one file is required in the training script. We support the mainstream checkpoints in [CivitAI](https://civitai.com/). By default, we use the base Stable Diffusion XL. You can download it from [huggingface](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors) or [modelscope](https://www.modelscope.cn/models/AI-ModelScope/stable-diffusion-xl-base-1.0/resolve/master/sd_xl_base_1.0.safetensors). You can use the following code to download this file:
+
+```python
+from diffsynth import download_models
+
+download_models(["StableDiffusionXL_v1"])
+```
+
+```
+models/stable_diffusion_xl
+├── Put Stable Diffusion XL checkpoints here.txt
+└── sd_xl_base_1.0.safetensors
+```
+
+We observed that Stable Diffusion XL is not float16-safe, thus we recommend users to use float32.
+
+```
+CUDA_VISIBLE_DEVICES="0" python examples/train/stable_diffusion_xl/train_sdxl_lora.py \
+ --pretrained_path models/stable_diffusion_xl/sd_xl_base_1.0.safetensors \
+ --dataset_path data/dog \
+ --output_path ./models \
+ --max_epochs 1 \
+ --steps_per_epoch 500 \
+ --height 1024 \
+ --width 1024 \
+ --center_crop \
+ --precision "32" \
+ --learning_rate 1e-4 \
+ --lora_rank 4 \
+ --lora_alpha 4 \
+ --use_gradient_checkpointing
+```
+
+For more information about the parameters, please use `python examples/train/stable_diffusion_xl/train_sdxl_lora.py -h` to see the details.
+
+After training, use `model_manager.load_lora` to load the LoRA for inference.
+
+```python
+from diffsynth import ModelManager, SDXLImagePipeline
+import torch
+
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda",
+ file_path_list=["models/stable_diffusion_xl/sd_xl_base_1.0.safetensors"])
+model_manager.load_lora("models/lightning_logs/version_0/checkpoints/epoch=0-step=500.ckpt", lora_alpha=1.0)
+pipe = SDXLImagePipeline.from_model_manager(model_manager)
+
+torch.manual_seed(0)
+image = pipe(
+ prompt="a dog is jumping, flowers around the dog, the background is mountains and clouds",
+ negative_prompt="bad quality, poor quality, doll, disfigured, jpg, toy, bad anatomy, missing limbs, missing fingers, 3d, cgi, extra tails",
+ cfg_scale=7.5,
+ num_inference_steps=100, width=1024, height=1024,
+)
+image.save("image_with_lora.jpg")
+```
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/train/flux/train_flux_lora.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/flux/train_flux_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb5539a203441f9e03223dc50e3bc09badfc78c6
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/flux/train_flux_lora.py
@@ -0,0 +1,124 @@
+from diffsynth import ModelManager, FluxImagePipeline
+from diffsynth.trainers.text_to_image import LightningModelForT2ILoRA, add_general_parsers, launch_training_task
+from diffsynth.models.lora import FluxLoRAConverter
+import torch, os, argparse
+os.environ["TOKENIZERS_PARALLELISM"] = "True"
+
+
+class LightningModel(LightningModelForT2ILoRA):
+ def __init__(
+ self,
+ torch_dtype=torch.float16, pretrained_weights=[], preset_lora_path=None,
+ learning_rate=1e-4, use_gradient_checkpointing=True,
+ lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="kaiming", pretrained_lora_path=None,
+ state_dict_converter=None, quantize = None
+ ):
+ super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing, state_dict_converter=state_dict_converter)
+ # Load models
+ model_manager = ModelManager(torch_dtype=torch_dtype, device=self.device)
+ if quantize is None:
+ model_manager.load_models(pretrained_weights)
+ else:
+ model_manager.load_models(pretrained_weights[1:])
+ model_manager.load_model(pretrained_weights[0], torch_dtype=quantize)
+ if preset_lora_path is not None:
+ preset_lora_path = preset_lora_path.split(",")
+ for path in preset_lora_path:
+ model_manager.load_lora(path)
+
+ self.pipe = FluxImagePipeline.from_model_manager(model_manager)
+
+ if quantize is not None:
+ self.pipe.dit.quantize()
+
+ self.pipe.scheduler.set_timesteps(1000, training=True)
+
+ self.freeze_parameters()
+ self.add_lora_to_model(
+ self.pipe.denoising_model(),
+ lora_rank=lora_rank,
+ lora_alpha=lora_alpha,
+ lora_target_modules=lora_target_modules,
+ init_lora_weights=init_lora_weights,
+ pretrained_lora_path=pretrained_lora_path,
+ state_dict_converter=FluxLoRAConverter.align_to_diffsynth_format
+ )
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
+ parser.add_argument(
+ "--pretrained_text_encoder_path",
+ type=str,
+ default=None,
+ required=True,
+ help="Path to pretrained text encoder model. For example, `models/FLUX/FLUX.1-dev/text_encoder/model.safetensors`.",
+ )
+ parser.add_argument(
+ "--pretrained_text_encoder_2_path",
+ type=str,
+ default=None,
+ required=True,
+ help="Path to pretrained t5 text encoder model. For example, `models/FLUX/FLUX.1-dev/text_encoder_2`.",
+ )
+ parser.add_argument(
+ "--pretrained_dit_path",
+ type=str,
+ default=None,
+ required=True,
+ help="Path to pretrained dit model. For example, `models/FLUX/FLUX.1-dev/flux1-dev.safetensors`.",
+ )
+ parser.add_argument(
+ "--pretrained_vae_path",
+ type=str,
+ default=None,
+ required=True,
+ help="Path to pretrained vae model. For example, `models/FLUX/FLUX.1-dev/ae.safetensors`.",
+ )
+ parser.add_argument(
+ "--lora_target_modules",
+ type=str,
+ default="a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp",
+ help="Layers with LoRA modules.",
+ )
+ parser.add_argument(
+ "--align_to_opensource_format",
+ default=False,
+ action="store_true",
+ help="Whether to export lora files aligned with other opensource format.",
+ )
+ parser.add_argument(
+ "--quantize",
+ type=str,
+ default=None,
+ choices=["float8_e4m3fn"],
+ help="Whether to use quantization when training the model, and in which format.",
+ )
+ parser.add_argument(
+ "--preset_lora_path",
+ type=str,
+ default=None,
+ help="Preset LoRA path.",
+ )
+ parser = add_general_parsers(parser)
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == '__main__':
+ args = parse_args()
+ model = LightningModel(
+ torch_dtype={"32": torch.float32, "bf16": torch.bfloat16}.get(args.precision, torch.float16),
+ pretrained_weights=[args.pretrained_dit_path, args.pretrained_text_encoder_path, args.pretrained_text_encoder_2_path, args.pretrained_vae_path],
+ preset_lora_path=args.preset_lora_path,
+ learning_rate=args.learning_rate,
+ use_gradient_checkpointing=args.use_gradient_checkpointing,
+ lora_rank=args.lora_rank,
+ lora_alpha=args.lora_alpha,
+ lora_target_modules=args.lora_target_modules,
+ init_lora_weights=args.init_lora_weights,
+ pretrained_lora_path=args.pretrained_lora_path,
+ state_dict_converter=FluxLoRAConverter.align_to_opensource_format if args.align_to_opensource_format else None,
+ quantize={"float8_e4m3fn": torch.float8_e4m3fn}.get(args.quantize, None),
+ )
+ launch_training_task(model, args)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/train/hunyuan_dit/train_hunyuan_dit_lora.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/hunyuan_dit/train_hunyuan_dit_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..7764ab5a65d8552c16c805e70b5ddfd2190b1ae3
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/hunyuan_dit/train_hunyuan_dit_lora.py
@@ -0,0 +1,70 @@
+from diffsynth import ModelManager, HunyuanDiTImagePipeline
+from diffsynth.trainers.text_to_image import LightningModelForT2ILoRA, add_general_parsers, launch_training_task
+import torch, os, argparse
+os.environ["TOKENIZERS_PARALLELISM"] = "True"
+
+
+class LightningModel(LightningModelForT2ILoRA):
+ def __init__(
+ self,
+ torch_dtype=torch.float16, pretrained_weights=[],
+ learning_rate=1e-4, use_gradient_checkpointing=True,
+ lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian", pretrained_lora_path=None,
+ ):
+ super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing)
+ # Load models
+ model_manager = ModelManager(torch_dtype=torch_dtype, device=self.device)
+ model_manager.load_models(pretrained_weights)
+ self.pipe = HunyuanDiTImagePipeline.from_model_manager(model_manager)
+ self.pipe.scheduler.set_timesteps(1000)
+
+ self.freeze_parameters()
+ self.add_lora_to_model(
+ self.pipe.denoising_model(),
+ lora_rank=lora_rank,
+ lora_alpha=lora_alpha,
+ lora_target_modules=lora_target_modules,
+ init_lora_weights=init_lora_weights,
+ pretrained_lora_path=pretrained_lora_path,
+ )
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
+ parser.add_argument(
+ "--pretrained_path",
+ type=str,
+ default=None,
+ required=True,
+ help="Path to pretrained model. For example, `./HunyuanDiT/t2i`.",
+ )
+ parser.add_argument(
+ "--lora_target_modules",
+ type=str,
+ default="to_q,to_k,to_v,to_out",
+ help="Layers with LoRA modules.",
+ )
+ parser = add_general_parsers(parser)
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == '__main__':
+ args = parse_args()
+ model = LightningModel(
+ torch_dtype=torch.float32 if args.precision == "32" else torch.float16,
+ pretrained_weights=[
+ os.path.join(args.pretrained_path, "clip_text_encoder/pytorch_model.bin"),
+ os.path.join(args.pretrained_path, "mt5/pytorch_model.bin"),
+ os.path.join(args.pretrained_path, "model/pytorch_model_ema.pt"),
+ os.path.join(args.pretrained_path, "sdxl-vae-fp16-fix/diffusion_pytorch_model.bin"),
+ ],
+ learning_rate=args.learning_rate,
+ use_gradient_checkpointing=args.use_gradient_checkpointing,
+ lora_rank=args.lora_rank,
+ lora_alpha=args.lora_alpha,
+ init_lora_weights=args.init_lora_weights,
+ pretrained_lora_path=args.pretrained_lora_path,
+ lora_target_modules=args.lora_target_modules
+ )
+ launch_training_task(model, args)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/train/kolors/train_kolors_lora.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/kolors/train_kolors_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..48a98929c97098c727ba9d70a68ac40094bcac62
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/kolors/train_kolors_lora.py
@@ -0,0 +1,86 @@
+from diffsynth import ModelManager, SDXLImagePipeline
+from diffsynth.trainers.text_to_image import LightningModelForT2ILoRA, add_general_parsers, launch_training_task
+import torch, os, argparse
+os.environ["TOKENIZERS_PARALLELISM"] = "True"
+
+
+class LightningModel(LightningModelForT2ILoRA):
+ def __init__(
+ self,
+ torch_dtype=torch.float16, pretrained_weights=[],
+ learning_rate=1e-4, use_gradient_checkpointing=True,
+ lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian", pretrained_lora_path=None,
+ ):
+ super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing)
+ # Load models
+ model_manager = ModelManager(torch_dtype=torch_dtype, device=self.device)
+ model_manager.load_models(pretrained_weights)
+ self.pipe = SDXLImagePipeline.from_model_manager(model_manager)
+ self.pipe.scheduler.set_timesteps(1100)
+
+ # Convert the vae encoder to torch.float16
+ self.pipe.vae_encoder.to(torch_dtype)
+
+ self.freeze_parameters()
+ self.add_lora_to_model(
+ self.pipe.denoising_model(),
+ lora_rank=lora_rank,
+ lora_alpha=lora_alpha,
+ lora_target_modules=lora_target_modules,
+ init_lora_weights=init_lora_weights,
+ pretrained_lora_path=pretrained_lora_path,
+ )
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
+ parser.add_argument(
+ "--pretrained_unet_path",
+ type=str,
+ default=None,
+ required=True,
+ help="Path to pretrained model (UNet). For example, `models/kolors/Kolors/unet/diffusion_pytorch_model.safetensors`.",
+ )
+ parser.add_argument(
+ "--pretrained_text_encoder_path",
+ type=str,
+ default=None,
+ required=True,
+ help="Path to pretrained model (Text Encoder). For example, `models/kolors/Kolors/text_encoder`.",
+ )
+ parser.add_argument(
+ "--pretrained_fp16_vae_path",
+ type=str,
+ default=None,
+ required=True,
+ help="Path to pretrained model (VAE). For example, `models/kolors/Kolors/sdxl-vae-fp16-fix/diffusion_pytorch_model.safetensors`.",
+ )
+ parser.add_argument(
+ "--lora_target_modules",
+ type=str,
+ default="to_q,to_k,to_v,to_out",
+ help="Layers with LoRA modules.",
+ )
+ parser = add_general_parsers(parser)
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == '__main__':
+ args = parse_args()
+ model = LightningModel(
+ torch_dtype=torch.float32 if args.precision == "32" else torch.float16,
+ pretrained_weights=[
+ args.pretrained_unet_path,
+ args.pretrained_text_encoder_path,
+ args.pretrained_fp16_vae_path,
+ ],
+ learning_rate=args.learning_rate,
+ use_gradient_checkpointing=args.use_gradient_checkpointing,
+ lora_rank=args.lora_rank,
+ lora_alpha=args.lora_alpha,
+ init_lora_weights=args.init_lora_weights,
+ pretrained_lora_path=args.pretrained_lora_path,
+ lora_target_modules=args.lora_target_modules
+ )
+ launch_training_task(model, args)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/train/stable_diffusion/train_sd_lora.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/stable_diffusion/train_sd_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc2452017ecf638062d93f48702730efb11cac42
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/stable_diffusion/train_sd_lora.py
@@ -0,0 +1,65 @@
+from diffsynth import ModelManager, SDImagePipeline
+from diffsynth.trainers.text_to_image import LightningModelForT2ILoRA, add_general_parsers, launch_training_task
+import torch, os, argparse
+os.environ["TOKENIZERS_PARALLELISM"] = "True"
+
+
+class LightningModel(LightningModelForT2ILoRA):
+ def __init__(
+ self,
+ torch_dtype=torch.float16, pretrained_weights=[],
+ learning_rate=1e-4, use_gradient_checkpointing=True,
+ lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian", pretrained_lora_path=None,
+ ):
+ super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing)
+ # Load models
+ model_manager = ModelManager(torch_dtype=torch_dtype, device=self.device)
+ model_manager.load_models(pretrained_weights)
+ self.pipe = SDImagePipeline.from_model_manager(model_manager)
+ self.pipe.scheduler.set_timesteps(1000)
+
+ self.freeze_parameters()
+ self.add_lora_to_model(
+ self.pipe.denoising_model(),
+ lora_rank=lora_rank,
+ lora_alpha=lora_alpha,
+ lora_target_modules=lora_target_modules,
+ init_lora_weights=init_lora_weights,
+ pretrained_lora_path=pretrained_lora_path,
+ )
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
+ parser.add_argument(
+ "--pretrained_path",
+ type=str,
+ default=None,
+ required=True,
+ help="Path to pretrained model. For example, `models/stable_diffusion/v1-5-pruned-emaonly.safetensors`.",
+ )
+ parser.add_argument(
+ "--lora_target_modules",
+ type=str,
+ default="to_q,to_k,to_v,to_out",
+ help="Layers with LoRA modules.",
+ )
+ parser = add_general_parsers(parser)
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == '__main__':
+ args = parse_args()
+ model = LightningModel(
+ torch_dtype=torch.float32 if args.precision == "32" else torch.float16,
+ pretrained_weights=[args.pretrained_path],
+ learning_rate=args.learning_rate,
+ use_gradient_checkpointing=args.use_gradient_checkpointing,
+ lora_rank=args.lora_rank,
+ lora_alpha=args.lora_alpha,
+ init_lora_weights=args.init_lora_weights,
+ pretrained_lora_path=args.pretrained_lora_path,
+ lora_target_modules=args.lora_target_modules
+ )
+ launch_training_task(model, args)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/train/stable_diffusion_3/train_sd3_lora.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/stable_diffusion_3/train_sd3_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..dde1c9665f2981ae8c592b4155472b8e5f171069
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/stable_diffusion_3/train_sd3_lora.py
@@ -0,0 +1,83 @@
+from diffsynth import ModelManager, SD3ImagePipeline
+from diffsynth.trainers.text_to_image import LightningModelForT2ILoRA, add_general_parsers, launch_training_task
+import torch, os, argparse
+os.environ["TOKENIZERS_PARALLELISM"] = "True"
+
+
+class LightningModel(LightningModelForT2ILoRA):
+ def __init__(
+ self,
+ torch_dtype=torch.float16, pretrained_weights=[], preset_lora_path=None,
+ learning_rate=1e-4, use_gradient_checkpointing=True,
+ lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian", pretrained_lora_path=None,
+ ):
+ super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing)
+ # Load models
+ model_manager = ModelManager(torch_dtype=torch_dtype, device=self.device)
+ model_manager.load_models(pretrained_weights)
+ self.pipe = SD3ImagePipeline.from_model_manager(model_manager)
+ self.pipe.scheduler.set_timesteps(1000, training=True)
+
+ if preset_lora_path is not None:
+ preset_lora_path = preset_lora_path.split(",")
+ for path in preset_lora_path:
+ model_manager.load_lora(path)
+
+ self.freeze_parameters()
+ self.add_lora_to_model(
+ self.pipe.denoising_model(),
+ lora_rank=lora_rank,
+ lora_alpha=lora_alpha,
+ lora_target_modules=lora_target_modules,
+ init_lora_weights=init_lora_weights,
+ pretrained_lora_path=pretrained_lora_path,
+ )
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
+ parser.add_argument(
+ "--pretrained_path",
+ type=str,
+ default=None,
+ required=True,
+ help="Path to pretrained models, separated by comma. For example, SD3: `models/stable_diffusion_3/sd3_medium_incl_clips_t5xxlfp16.safetensors`, SD3.5-large: `models/stable_diffusion_3/text_encoders/clip_g.safetensors,models/stable_diffusion_3/text_encoders/clip_l.safetensors,models/stable_diffusion_3/text_encoders/t5xxl_fp16.safetensors,models/stable_diffusion_3/sd3.5_large.safetensors`",
+ )
+ parser.add_argument(
+ "--lora_target_modules",
+ type=str,
+ default="a_to_qkv,b_to_qkv,norm_1_a.linear,norm_1_b.linear,a_to_out,b_to_out,ff_a.0,ff_a.2,ff_b.0,ff_b.2",
+ help="Layers with LoRA modules.",
+ )
+ parser.add_argument(
+ "--preset_lora_path",
+ type=str,
+ default=None,
+ help="Preset LoRA path.",
+ )
+ parser.add_argument(
+ "--num_timesteps",
+ type=int,
+ default=1000,
+ help="Number of total timesteps. For turbo models, please set this parameter to the number of expected number of inference steps.",
+ )
+ parser = add_general_parsers(parser)
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == '__main__':
+ args = parse_args()
+ model = LightningModel(
+ torch_dtype=torch.float32 if args.precision == "32" else torch.float16,
+ pretrained_weights=args.pretrained_path.split(","),
+ preset_lora_path=args.preset_lora_path,
+ learning_rate=args.learning_rate,
+ use_gradient_checkpointing=args.use_gradient_checkpointing,
+ lora_rank=args.lora_rank,
+ lora_alpha=args.lora_alpha,
+ init_lora_weights=args.init_lora_weights,
+ pretrained_lora_path=args.pretrained_lora_path,
+ lora_target_modules=args.lora_target_modules
+ )
+ launch_training_task(model, args)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/train/stable_diffusion_xl/train_sdxl_lora.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/stable_diffusion_xl/train_sdxl_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..de0241d25a51d9d92c5f1ec6c543e930e9d97ec8
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/train/stable_diffusion_xl/train_sdxl_lora.py
@@ -0,0 +1,65 @@
+from diffsynth import ModelManager, SDXLImagePipeline
+from diffsynth.trainers.text_to_image import LightningModelForT2ILoRA, add_general_parsers, launch_training_task
+import torch, os, argparse
+os.environ["TOKENIZERS_PARALLELISM"] = "True"
+
+
+class LightningModel(LightningModelForT2ILoRA):
+ def __init__(
+ self,
+ torch_dtype=torch.float16, pretrained_weights=[],
+ learning_rate=1e-4, use_gradient_checkpointing=True,
+ lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="gaussian", pretrained_lora_path=None,
+ ):
+ super().__init__(learning_rate=learning_rate, use_gradient_checkpointing=use_gradient_checkpointing)
+ # Load models
+ model_manager = ModelManager(torch_dtype=torch_dtype, device=self.device)
+ model_manager.load_models(pretrained_weights)
+ self.pipe = SDXLImagePipeline.from_model_manager(model_manager)
+ self.pipe.scheduler.set_timesteps(1000)
+
+ self.freeze_parameters()
+ self.add_lora_to_model(
+ self.pipe.denoising_model(),
+ lora_rank=lora_rank,
+ lora_alpha=lora_alpha,
+ lora_target_modules=lora_target_modules,
+ init_lora_weights=init_lora_weights,
+ pretrained_lora_path=pretrained_lora_path,
+ )
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
+ parser.add_argument(
+ "--pretrained_path",
+ type=str,
+ default=None,
+ required=True,
+ help="Path to pretrained model. For example, `models/stable_diffusion_xl/sd_xl_base_1.0.safetensors`.",
+ )
+ parser.add_argument(
+ "--lora_target_modules",
+ type=str,
+ default="to_q,to_k,to_v,to_out",
+ help="Layers with LoRA modules.",
+ )
+ parser = add_general_parsers(parser)
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == '__main__':
+ args = parse_args()
+ model = LightningModel(
+ torch_dtype=torch.float32 if args.precision == "32" else torch.float16,
+ pretrained_weights=[args.pretrained_path],
+ learning_rate=args.learning_rate,
+ use_gradient_checkpointing=args.use_gradient_checkpointing,
+ lora_rank=args.lora_rank,
+ lora_alpha=args.lora_alpha,
+ init_lora_weights=args.init_lora_weights,
+ pretrained_lora_path=args.pretrained_lora_path,
+ lora_target_modules=args.lora_target_modules
+ )
+ launch_training_task(model, args)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/video_synthesis/README.md b/exp_code/1_benchmark/DiffSynth-Studio/examples/video_synthesis/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..f6c0715b87d022c3c54ac3543e2c0dab59ef890a
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/video_synthesis/README.md
@@ -0,0 +1,7 @@
+# Text to Video
+
+### Example: Text-to-Video using AnimateDiff
+
+Generate a video using a Stable Diffusion model and an AnimateDiff model. We can break the limitation of number of frames! See [sd_text_to_video.py](./sd_text_to_video.py).
+
+https://github.com/Artiprocher/DiffSynth-Studio/assets/35051019/8f556355-4079-4445-9b48-e9da77699437
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/video_synthesis/sd_text_to_video.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/video_synthesis/sd_text_to_video.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac0fe892f7cfc543532c296d098406d1436bb678
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/video_synthesis/sd_text_to_video.py
@@ -0,0 +1,39 @@
+from diffsynth import ModelManager, SDImagePipeline, SDVideoPipeline, save_video, download_models
+import torch
+
+
+# Download models (automatically)
+# `models/stable_diffusion/dreamshaper_8.safetensors`: [link](https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16)
+# `models/AnimateDiff/mm_sd_v15_v2.ckpt`: [link](https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15_v2.ckpt)
+download_models(["DreamShaper_8", "AnimateDiff_v2"])
+
+# Load models
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
+model_manager.load_models([
+ "models/stable_diffusion/dreamshaper_8.safetensors",
+ "models/AnimateDiff/mm_sd_v15_v2.ckpt",
+])
+
+# Text -> Image
+pipe_image = SDImagePipeline.from_model_manager(model_manager)
+torch.manual_seed(0)
+image = pipe_image(
+ prompt = "lightning storm, sea",
+ negative_prompt = "",
+ cfg_scale=7.5,
+ num_inference_steps=30, height=512, width=768,
+)
+
+# Text + Image -> Video (6GB VRAM is enough!)
+pipe = SDVideoPipeline.from_model_manager(model_manager)
+output_video = pipe(
+ prompt = "lightning storm, sea",
+ negative_prompt = "",
+ cfg_scale=7.5,
+ num_frames=64,
+ num_inference_steps=10, height=512, width=768,
+ animatediff_batch_size=16, animatediff_stride=1, input_frames=[image]*64, denoising_strength=0.9,
+)
+
+# Save images and video
+save_video(output_video, "output_video.mp4", fps=30)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/video_synthesis/sdxl_text_to_video.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/video_synthesis/sdxl_text_to_video.py
new file mode 100644
index 0000000000000000000000000000000000000000..b96498a7554335f8a7bb1c3ec8e2edea1138c287
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/video_synthesis/sdxl_text_to_video.py
@@ -0,0 +1,28 @@
+from diffsynth import ModelManager, SDXLVideoPipeline, save_video, download_models
+import torch
+
+
+# Download models (automatically)
+# `models/stable_diffusion_xl/sd_xl_base_1.0.safetensors`: [link](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors)
+# `models/AnimateDiff/mm_sdxl_v10_beta.ckpt`: [link](https://huggingface.co/guoyww/animatediff/resolve/main/mm_sdxl_v10_beta.ckpt)
+download_models(["StableDiffusionXL_v1", "AnimateDiff_xl_beta"])
+
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
+model_manager.load_models([
+ "models/stable_diffusion_xl/sd_xl_base_1.0.safetensors",
+ "models/AnimateDiff/mm_sdxl_v10_beta.ckpt"
+])
+pipe = SDXLVideoPipeline.from_model_manager(model_manager)
+
+prompt = "A panda standing on a surfboard in the ocean in sunset, 4k, high resolution.Realistic, Cinematic, high resolution"
+negative_prompt = ""
+
+torch.manual_seed(0)
+video = pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ cfg_scale=8.5,
+ height=1024, width=1024, num_frames=16,
+ num_inference_steps=100,
+)
+save_video(video, "output_video.mp4", fps=16)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/video_synthesis/svd_text_to_video.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/video_synthesis/svd_text_to_video.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee91c92069ffb1bbc43c19d69e0bdd0d2ced612c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/video_synthesis/svd_text_to_video.py
@@ -0,0 +1,36 @@
+from diffsynth import save_video, SDXLImagePipeline, ModelManager, SVDVideoPipeline, download_models
+from diffsynth import ModelManager
+import torch
+
+
+# Download models (automatically)
+# `models/stable_diffusion_xl/sd_xl_base_1.0.safetensors`: [link](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors)
+# `models/stable_video_diffusion/svd_xt.safetensors`: [link](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/resolve/main/svd_xt.safetensors)
+download_models(["StableDiffusionXL_v1", "stable-video-diffusion-img2vid-xt"])
+
+prompt = "cloud, wind"
+torch.manual_seed(0)
+
+# 1. Text-to-image using SD-XL
+model_manager = ModelManager(torch_dtype=torch.float16, device="cuda")
+model_manager.load_models(["models/stable_diffusion_xl/sd_xl_base_1.0.safetensors"])
+pipe = SDXLImagePipeline.from_model_manager(model_manager)
+image = pipe(
+ prompt=prompt,
+ negative_prompt="",
+ cfg_scale=6,
+ height=1024, width=1024, num_inference_steps=50,
+)
+model_manager.to("cpu")
+
+# 2. Image-to-video using SVD
+model_manager = ModelManager()
+model_manager.load_models(["models/stable_video_diffusion/svd_xt.safetensors"])
+pipe = SVDVideoPipeline.from_model_manager(model_manager)
+video = pipe(
+ input_image=image,
+ num_frames=25, fps=15, height=1024, width=1024,
+ motion_bucket_id=127,
+ num_inference_steps=50
+)
+save_video(video, "output_video.mp4", fps=15)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/vram_management/README.md b/exp_code/1_benchmark/DiffSynth-Studio/examples/vram_management/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..98f96767eb4bb7cc90a7a1c86ea471734ef5d9cf
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/vram_management/README.md
@@ -0,0 +1,3 @@
+# VRAM Management
+
+Experimental feature. Still under development.
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/vram_management/flux_text_to_image.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/vram_management/flux_text_to_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea241060bb7288d329666a7981584e263d3fe030
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/vram_management/flux_text_to_image.py
@@ -0,0 +1,25 @@
+import torch
+from diffsynth import ModelManager, FluxImagePipeline
+
+
+model_manager = ModelManager(
+ file_path_list=[
+ "models/FLUX/FLUX.1-dev/text_encoder/model.safetensors",
+ "models/FLUX/FLUX.1-dev/text_encoder_2",
+ "models/FLUX/FLUX.1-dev/flux1-dev.safetensors",
+ "models/FLUX/FLUX.1-dev/ae.safetensors",
+ ],
+ torch_dtype=torch.float8_e4m3fn,
+ device="cpu"
+)
+pipe = FluxImagePipeline.from_model_manager(model_manager, torch_dtype=torch.bfloat16, device="cuda")
+
+# Enable VRAM management
+# `num_persistent_param_in_dit` indicates the number of parameters that reside persistently in VRAM within the DiT model.
+# When `num_persistent_param_in_dit=None`, it means all parameters reside persistently in memory.
+# When `num_persistent_param_in_dit=7*10**9`, it indicates that 7 billion parameters reside persistently in memory.
+# When `num_persistent_param_in_dit=0`, it means no parameters reside persistently in memory, and they are loaded layer by layer during inference.
+pipe.enable_vram_management(num_persistent_param_in_dit=None)
+
+image = pipe(prompt="a beautiful orange cat", seed=0)
+image.save("image.jpg")
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/README.md b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..add9fa5ae052991bbf9f4ecdf3dc92060eced703
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/README.md
@@ -0,0 +1,463 @@
+# Wan
+
+[切换到中文](./README_zh.md)
+
+Wan is a collection of video synthesis models open-sourced by Alibaba.
+
+**DiffSynth-Studio has adopted a new inference and training framework. To use the previous version, please click [here](https://github.com/modelscope/DiffSynth-Studio/tree/3edf3583b1f08944cee837b94d9f84d669c2729c).**
+
+## Installation
+
+Before using this model, please install DiffSynth-Studio from **source code**.
+
+```shell
+git clone https://github.com/modelscope/DiffSynth-Studio.git
+cd DiffSynth-Studio
+pip install -e .
+```
+
+## Quick Start
+
+You can quickly load the [Wan-AI/Wan2.1-T2V-1.3B](https://www.modelscope.cn/models/Wan-AI/Wan2.1-T2V-1.3B) model and run inference by executing the code below.
+
+```python
+import torch
+from diffsynth import save_video
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+video = pipe(
+ prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+)
+save_video(video, "video1.mp4", fps=15, quality=5)
+```
+
+## Overview
+
+| Model ID | Extra Parameters | Inference | Full Training | Full Training Validation | LoRA Training | LoRA Training Validation |
+|-|-|-|-|-|-|-|
+|[Wan-AI/Wan2.2-S2V-14B](https://www.modelscope.cn/models/Wan-AI/Wan2.2-S2V-14B)|`input_image`, `input_audio`, `audio_sample_rate`, `s2v_pose_video`|[code](./model_inference/Wan2.2-S2V-14B.py)|-|-|-|-|
+|[Wan-AI/Wan2.2-I2V-A14B](https://modelscope.cn/models/Wan-AI/Wan2.2-I2V-A14B)|`input_image`|[code](./model_inference/Wan2.2-I2V-A14B.py)|[code](./model_training/full/Wan2.2-I2V-A14B.sh)|[code](./model_training/validate_full/Wan2.2-I2V-A14B.py)|[code](./model_training/lora/Wan2.2-I2V-A14B.sh)|[code](./model_training/validate_lora/Wan2.2-I2V-A14B.py)|
+|[Wan-AI/Wan2.2-T2V-A14B](https://modelscope.cn/models/Wan-AI/Wan2.2-T2V-A14B)||[code](./model_inference/Wan2.2-T2V-A14B.py)|[code](./model_training/full/Wan2.2-T2V-A14B.sh)|[code](./model_training/validate_full/Wan2.2-T2V-A14B.py)|[code](./model_training/lora/Wan2.2-T2V-A14B.sh)|[code](./model_training/validate_lora/Wan2.2-T2V-A14B.py)|
+|[Wan-AI/Wan2.2-TI2V-5B](https://modelscope.cn/models/Wan-AI/Wan2.2-TI2V-5B)|`input_image`|[code](./model_inference/Wan2.2-TI2V-5B.py)|[code](./model_training/full/Wan2.2-TI2V-5B.sh)|[code](./model_training/validate_full/Wan2.2-TI2V-5B.py)|[code](./model_training/lora/Wan2.2-TI2V-5B.sh)|[code](./model_training/validate_lora/Wan2.2-TI2V-5B.py)|
+|[Wan-AI/Wan2.1-T2V-1.3B](https://modelscope.cn/models/Wan-AI/Wan2.1-T2V-1.3B)||[code](./model_inference/Wan2.1-T2V-1.3B.py)|[code](./model_training/full/Wan2.1-T2V-1.3B.sh)|[code](./model_training/validate_full/Wan2.1-T2V-1.3B.py)|[code](./model_training/lora/Wan2.1-T2V-1.3B.sh)|[code](./model_training/validate_lora/Wan2.1-T2V-1.3B.py)|
+|[Wan-AI/Wan2.1-T2V-14B](https://modelscope.cn/models/Wan-AI/Wan2.1-T2V-14B)||[code](./model_inference/Wan2.1-T2V-14B.py)|[code](./model_training/full/Wan2.1-T2V-14B.sh)|[code](./model_training/validate_full/Wan2.1-T2V-14B.py)|[code](./model_training/lora/Wan2.1-T2V-14B.sh)|[code](./model_training/validate_lora/Wan2.1-T2V-14B.py)|
+|[Wan-AI/Wan2.1-I2V-14B-480P](https://modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-480P)|`input_image`|[code](./model_inference/Wan2.1-I2V-14B-480P.py)|[code](./model_training/full/Wan2.1-I2V-14B-480P.sh)|[code](./model_training/validate_full/Wan2.1-I2V-14B-480P.py)|[code](./model_training/lora/Wan2.1-I2V-14B-480P.sh)|[code](./model_training/validate_lora/Wan2.1-I2V-14B-480P.py)|
+|[Wan-AI/Wan2.1-I2V-14B-720P](https://modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-720P)|`input_image`|[code](./model_inference/Wan2.1-I2V-14B-720P.py)|[code](./model_training/full/Wan2.1-I2V-14B-720P.sh)|[code](./model_training/validate_full/Wan2.1-I2V-14B-720P.py)|[code](./model_training/lora/Wan2.1-I2V-14B-720P.sh)|[code](./model_training/validate_lora/Wan2.1-I2V-14B-720P.py)|
+|[Wan-AI/Wan2.1-FLF2V-14B-720P](https://modelscope.cn/models/Wan-AI/Wan2.1-FLF2V-14B-720P)|`input_image`, `end_image`|[code](./model_inference/Wan2.1-FLF2V-14B-720P.py)|[code](./model_training/full/Wan2.1-FLF2V-14B-720P.sh)|[code](./model_training/validate_full/Wan2.1-FLF2V-14B-720P.py)|[code](./model_training/lora/Wan2.1-FLF2V-14B-720P.sh)|[code](./model_training/validate_lora/Wan2.1-FLF2V-14B-720P.py)|
+|[PAI/Wan2.1-Fun-1.3B-InP](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-InP)|`input_image`, `end_image`|[code](./model_inference/Wan2.1-Fun-1.3B-InP.py)|[code](./model_training/full/Wan2.1-Fun-1.3B-InP.sh)|[code](./model_training/validate_full/Wan2.1-Fun-1.3B-InP.py)|[code](./model_training/lora/Wan2.1-Fun-1.3B-InP.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-1.3B-InP.py)|
+|[PAI/Wan2.1-Fun-1.3B-Control](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-Control)|`control_video`|[code](./model_inference/Wan2.1-Fun-1.3B-Control.py)|[code](./model_training/full/Wan2.1-Fun-1.3B-Control.sh)|[code](./model_training/validate_full/Wan2.1-Fun-1.3B-Control.py)|[code](./model_training/lora/Wan2.1-Fun-1.3B-Control.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-1.3B-Control.py)|
+|[PAI/Wan2.1-Fun-14B-InP](https://modelscope.cn/models/PAI/Wan2.1-Fun-14B-InP)|`input_image`, `end_image`|[code](./model_inference/Wan2.1-Fun-14B-InP.py)|[code](./model_training/full/Wan2.1-Fun-14B-InP.sh)|[code](./model_training/validate_full/Wan2.1-Fun-14B-InP.py)|[code](./model_training/lora/Wan2.1-Fun-14B-InP.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-14B-InP.py)|
+|[PAI/Wan2.1-Fun-14B-Control](https://modelscope.cn/models/PAI/Wan2.1-Fun-14B-Control)|`control_video`|[code](./model_inference/Wan2.1-Fun-14B-Control.py)|[code](./model_training/full/Wan2.1-Fun-14B-Control.sh)|[code](./model_training/validate_full/Wan2.1-Fun-14B-Control.py)|[code](./model_training/lora/Wan2.1-Fun-14B-Control.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-14B-Control.py)|
+|[PAI/Wan2.1-Fun-V1.1-1.3B-Control](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-1.3B-Control)|`control_video`, `reference_image`|[code](./model_inference/Wan2.1-Fun-V1.1-1.3B-Control.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-1.3B-Control.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control.py)|
+|[PAI/Wan2.1-Fun-V1.1-14B-Control](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-Control)|`control_video`, `reference_image`|[code](./model_inference/Wan2.1-Fun-V1.1-14B-Control.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-14B-Control.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-14B-Control.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control.py)|
+|[PAI/Wan2.1-Fun-V1.1-1.3B-InP](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-1.3B-InP)|`input_image`, `end_image`|[code](./model_inference/Wan2.1-Fun-V1.1-1.3B-InP.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-1.3B-InP.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-InP.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-1.3B-InP.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-InP.py)|
+|[PAI/Wan2.1-Fun-V1.1-14B-InP](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-InP)|`input_image`, `end_image`|[code](./model_inference/Wan2.1-Fun-V1.1-14B-InP.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-14B-InP.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-14B-InP.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-14B-InP.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-14B-InP.py)|
+|[PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera)|`control_camera_video`, `input_image`|[code](./model_inference/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py)|
+|[PAI/Wan2.1-Fun-V1.1-14B-Control-Camera](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-Control-Camera)|`control_camera_video`, `input_image`|[code](./model_inference/Wan2.1-Fun-V1.1-14B-Control-Camera.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-14B-Control-Camera.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control-Camera.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-14B-Control-Camera.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control-Camera.py)|
+|[iic/VACE-Wan2.1-1.3B-Preview](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview)|`vace_control_video`, `vace_reference_image`|[code](./model_inference/Wan2.1-VACE-1.3B-Preview.py)|[code](./model_training/full/Wan2.1-VACE-1.3B-Preview.sh)|[code](./model_training/validate_full/Wan2.1-VACE-1.3B-Preview.py)|[code](./model_training/lora/Wan2.1-VACE-1.3B-Preview.sh)|[code](./model_training/validate_lora/Wan2.1-VACE-1.3B-Preview.py)|
+|[Wan-AI/Wan2.1-VACE-1.3B](https://modelscope.cn/models/Wan-AI/Wan2.1-VACE-1.3B)|`vace_control_video`, `vace_reference_image`|[code](./model_inference/Wan2.1-VACE-1.3B.py)|[code](./model_training/full/Wan2.1-VACE-1.3B.sh)|[code](./model_training/validate_full/Wan2.1-VACE-1.3B.py)|[code](./model_training/lora/Wan2.1-VACE-1.3B.sh)|[code](./model_training/validate_lora/Wan2.1-VACE-1.3B.py)|
+|[Wan-AI/Wan2.1-VACE-14B](https://modelscope.cn/models/Wan-AI/Wan2.1-VACE-14B)|`vace_control_video`, `vace_reference_image`|[code](./model_inference/Wan2.1-VACE-14B.py)|[code](./model_training/full/Wan2.1-VACE-14B.sh)|[code](./model_training/validate_full/Wan2.1-VACE-14B.py)|[code](./model_training/lora/Wan2.1-VACE-14B.sh)|[code](./model_training/validate_lora/Wan2.1-VACE-14B.py)|
+|[DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1)|`motion_bucket_id`|[code](./model_inference/Wan2.1-1.3b-speedcontrol-v1.py)|[code](./model_training/full/Wan2.1-1.3b-speedcontrol-v1.sh)|[code](./model_training/validate_full/Wan2.1-1.3b-speedcontrol-v1.py)|[code](./model_training/lora/Wan2.1-1.3b-speedcontrol-v1.sh)|[code](./model_training/validate_lora/Wan2.1-1.3b-speedcontrol-v1.py)|
+
+
+## Model Inference
+
+The following sections will help you understand our functionalities and write inference code.
+
+
+
+Loading the Model
+
+The model is loaded using `from_pretrained`:
+
+```python
+import torch
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth"),
+ ],
+)
+```
+
+Here, `torch_dtype` and `device` specify the computation precision and device respectively. The `model_configs` can be used to configure model paths in various ways:
+
+* Downloading the model from [ModelScope](https://modelscope.cn/) and loading it. In this case, both `model_id` and `origin_file_pattern` need to be specified, for example:
+
+```python
+ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors")
+```
+
+* Loading the model from a local file path. In this case, the `path` parameter needs to be specified, for example:
+
+```python
+ModelConfig(path="models/Wan-AI/Wan2.1-T2V-1.3B/diffusion_pytorch_model.safetensors")
+```
+
+For models that are loaded from multiple files, simply use a list, for example:
+
+```python
+ModelConfig(path=[
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors",
+])
+```
+
+The `ModelConfig` function provides additional parameters to control the behavior during model loading:
+
+* `local_model_path`: Path where downloaded models are saved. Default value is `"./models"`.
+* `skip_download`: Whether to skip downloading models. Default value is `False`. When your network cannot access [ModelScope](https://modelscope.cn/), manually download the necessary files and set this to `True`.
+
+The `from_pretrained` function provides additional parameters to control the behavior during model loading:
+
+* `tokenizer_config`: Path to the tokenizer of the Wan model. Default value is `ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/*")`.
+* `redirect_common_files`: Whether to redirect duplicate model files. Default value is `True`. Since the Wan series models include multiple base models, some modules like text encoder are shared across these models. To avoid redundant downloads, we redirect the model paths.
+* `use_usp`: Whether to enable Unified Sequence Parallel. Default value is `False`. Used for multi-GPU parallel inference.
+
+
+
+
+
+VRAM Management
+
+DiffSynth-Studio provides fine-grained VRAM management for the Wan model, allowing it to run on devices with limited VRAM. You can enable offloading functionality via the following code, which moves parts of the model to system memory on devices with limited VRAM:
+
+```python
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+```
+
+FP8 quantization is also supported:
+
+```python
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_dtype=torch.float8_e4m3fn),
+ ],
+)
+pipe.enable_vram_management()
+```
+
+Both FP8 quantization and offloading can be enabled simultaneously:
+
+```python
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+)
+pipe.enable_vram_management()
+```
+
+FP8 quantization significantly reduces VRAM usage but does not accelerate computations. Some models may experience issues such as blurry, torn, or distorted outputs due to insufficient precision when using FP8 quantization. Use FP8 quantization with caution.
+
+After enabling VRAM management, the framework will automatically decide the VRAM strategy based on available GPU memory. The `enable_vram_management` function has the following parameters to manually control the VRAM strategy:
+
+* `vram_limit`: VRAM usage limit in GB. By default, it uses all free VRAM on the device. Note that this is not an absolute limit. If the set VRAM is not enough but more VRAM is actually available, the model will run with minimal VRAM usage. Setting it to 0 achieves the theoretical minimum VRAM usage.
+* `vram_buffer`: VRAM buffer size in GB. Default is 0.5GB. A buffer is needed because larger neural network layers may use more VRAM than expected during loading. The optimal value is the VRAM used by the largest layer in the model.
+* `num_persistent_param_in_dit`: Number of parameters in the DiT model that stay in VRAM. Default is no limit. We plan to remove this parameter in the future. Do not rely on it.
+
+
+
+
+
+Inference Acceleration
+
+Wan supports multiple acceleration techniques, including:
+
+* **Efficient attention implementations**: If any of these attention implementations are installed in your Python environment, they will be automatically enabled in the following priority:
+ * [Flash Attention 3](https://github.com/Dao-AILab/flash-attention)
+ * [Flash Attention 2](https://github.com/Dao-AILab/flash-attention)
+ * [Sage Attention](https://github.com/thu-ml/SageAttention)
+ * [torch SDPA](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) (default setting; we recommend installing `torch>=2.5.0`)
+* **Unified Sequence Parallel**: Sequence parallelism based on [xDiT](https://github.com/xdit-project/xDiT). Please refer to [this example](./acceleration/unified_sequence_parallel.py), and run it using the command:
+
+```shell
+pip install "xfuser[flash-attn]>=0.4.3"
+torchrun --standalone --nproc_per_node=8 examples/wanvideo/acceleration/unified_sequence_parallel.py
+```
+
+* **TeaCache**: Acceleration technique [TeaCache](https://github.com/ali-vilab/TeaCache). Please refer to [this example](./acceleration/teacache.py).
+
+
+
+
+
+
+Input Parameters
+
+The pipeline accepts the following input parameters during inference:
+
+* `prompt`: Prompt describing the content to appear in the video.
+* `negative_prompt`: Negative prompt describing content that should not appear in the video. Default is `""`.
+* `input_image`: Input image, applicable for image-to-video models such as [`Wan-AI/Wan2.1-I2V-14B-480P`](https://modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-480P) and [`PAI/Wan2.1-Fun-1.3B-InP`](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-InP), as well as first-and-last-frame models like [`Wan-AI/Wan2.1-FLF2V-14B-720P`](Wan-AI/Wan2.1-FLF2V-14B-720P).
+* `end_image`: End frame, applicable for first-and-last-frame models such as [`Wan-AI/Wan2.1-FLF2V-14B-720P`](Wan-AI/Wan2.1-FLF2V-14B-720P).
+* `input_video`: Input video used for video-to-video generation. Applicable to any Wan series model and must be used together with `denoising_strength`.
+* `denoising_strength`: Denoising strength in range [0, 1]. A smaller value results in a video closer to `input_video`.
+* `control_video`: Control video, applicable to Wan models with control capabilities such as [`PAI/Wan2.1-Fun-1.3B-Control`](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-Control).
+* `reference_image`: Reference image, applicable to Wan models supporting reference images such as [`PAI/Wan2.1-Fun-V1.1-1.3B-Control`](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-1.3B-Control).
+* `camera_control_direction`: Camera control direction, optional values are "Left", "Right", "Up", "Down", "LeftUp", "LeftDown", "RightUp", "RightDown". Applicable to Camera-Control models, such as [PAI/Wan2.1-Fun-V1.1-14B-Control-Camera](https://www.modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-Control-Camera).
+* `camera_control_speed`: Camera control speed. Applicable to Camera-Control models, such as [PAI/Wan2.1-Fun-V1.1-14B-Control-Camera](https://www.modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-Control-Camera).
+* `camera_control_origin`: Origin coordinate of the camera control sequence. Please refer to the [original paper](https://arxiv.org/pdf/2404.02101) for proper configuration. Applicable to Camera-Control models, such as [PAI/Wan2.1-Fun-V1.1-14B-Control-Camera](https://www.modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-Control-Camera).
+* `vace_video`: Input video for VACE models, applicable to the VACE series such as [`iic/VACE-Wan2.1-1.3B-Preview`](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview).
+* `vace_video_mask`: Mask video for VACE models, applicable to the VACE series such as [`iic/VACE-Wan2.1-1.3B-Preview`](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview).
+* `vace_reference_image`: Reference image for VACE models, applicable to the VACE series such as [`iic/VACE-Wan2.1-1.3B-Preview`](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview).
+* `vace_scale`: Influence of the VACE model on the base model, default is 1. Higher values increase control strength but may lead to visual artifacts or breakdowns.
+* `seed`: Random seed. Default is `None`, meaning fully random.
+* `rand_device`: Device used to generate random Gaussian noise matrix. Default is `"cpu"`. When set to `"cuda"`, different GPUs may produce different generation results.
+* `height`: Frame height, default is 480. Must be a multiple of 16; if not, it will be rounded up.
+* `width`: Frame width, default is 832. Must be a multiple of 16; if not, it will be rounded up.
+* `num_frames`: Number of frames, default is 81. Must be a multiple of 4 plus 1; if not, it will be rounded up, minimum is 1.
+* `cfg_scale`: Classifier-free guidance scale, default is 5. Higher values increase adherence to the prompt but may cause visual artifacts.
+* `cfg_merge`: Whether to merge both sides of classifier-free guidance for unified inference. Default is `False`. This parameter currently only works for basic text-to-video and image-to-video models.
+* `switch_DiT_boundary`: The time point for switching between DiT models. Default value is 0.875. This parameter only takes effect for mixed models with multiple DiTs, for example, [Wan-AI/Wan2.2-I2V-A14B](https://modelscope.cn/models/Wan-AI/Wan2.2-I2V-A14B).
+* `num_inference_steps`: Number of inference steps, default is 50.
+* `sigma_shift`: Parameter from Rectified Flow theory, default is 5. Higher values make the model stay longer at the initial denoising stage. Increasing this may improve video quality but may also cause inconsistency between generated videos and training data due to deviation from training behavior.
+* `motion_bucket_id`: Motion intensity, range [0, 100], applicable to motion control modules such as [`DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1`](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1). Larger values indicate more intense motion.
+* `tiled`: Whether to enable tiled VAE inference, default is `False`. Setting to `True` significantly reduces VRAM usage during VAE encoding/decoding but introduces small errors and slightly increases inference time.
+* `tile_size`: Tile size during VAE encoding/decoding, default is (30, 52), only effective when `tiled=True`.
+* `tile_stride`: Stride of tiles during VAE encoding/decoding, default is (15, 26), only effective when `tiled=True`. Must be less than or equal to `tile_size`.
+* `sliding_window_size`: Sliding window size for DiT part. Experimental feature, effects are unstable.
+* `sliding_window_stride`: Sliding window stride for DiT part. Experimental feature, effects are unstable.
+* `tea_cache_l1_thresh`: Threshold for TeaCache. Larger values result in faster speed but lower quality. Note that after enabling TeaCache, the inference speed is not uniform, so the remaining time shown on the progress bar becomes inaccurate.
+* `tea_cache_model_id`: TeaCache parameter template, options include `"Wan2.1-T2V-1.3B"`, `"Wan2.1-T2V-14B"`, `"Wan2.1-I2V-14B-480P"`, `"Wan2.1-I2V-14B-720P"`.
+* `progress_bar_cmd`: Progress bar implementation, default is `tqdm.tqdm`. You can set it to `lambda x:x` to disable the progress bar.
+
+
+
+## Model Training
+
+Wan series models are trained using a unified script located at [`./model_training/train.py`](./model_training/train.py).
+
+
+
+Script Parameters
+
+The script includes the following parameters:
+
+* Dataset
+ * `--dataset_base_path`: Base path of the dataset.
+ * `--dataset_metadata_path`: Path to the metadata file of the dataset.
+ * `--height`: Height of images or videos. Leave `height` and `width` empty to enable dynamic resolution.
+ * `--width`: Width of images or videos. Leave `height` and `width` empty to enable dynamic resolution.
+ * `--num_frames`: Number of frames per video. Frames are sampled from the video prefix.
+ * `--data_file_keys`: Data file keys in the metadata. Comma-separated.
+ * `--dataset_repeat`: Number of times to repeat the dataset per epoch.
+ * `--dataset_num_workers`: Number of workers for data loading.
+* Models
+ * `--model_paths`: Paths to load models. In JSON format.
+ * `--model_id_with_origin_paths`: Model ID with origin paths, e.g., Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors. Comma-separated.
+ * `--max_timestep_boundary`: Maximum value of the timestep interval, ranging from 0 to 1. Default is 1. This needs to be manually set only when training mixed models with multiple DiTs, for example, [Wan-AI/Wan2.2-I2V-A14B](https://modelscope.cn/models/Wan-AI/Wan2.2-I2V-A14B).
+ * `--min_timestep_boundary`: Minimum value of the timestep interval, ranging from 0 to 1. Default is 1. This needs to be manually set only when training mixed models with multiple DiTs, for example, [Wan-AI/Wan2.2-I2V-A14B](https://modelscope.cn/models/Wan-AI/Wan2.2-I2V-A14B).
+* Training
+ * `--learning_rate`: Learning rate.
+ * `--weight_decay`: Weight decay.
+ * `--num_epochs`: Number of epochs.
+ * `--output_path`: Output save path.
+ * `--remove_prefix_in_ckpt`: Remove prefix in ckpt.
+ * `--save_steps`: Number of checkpoint saving invervals. If None, checkpoints will be saved every epoch.
+ * `--find_unused_parameters`: Whether to find unused parameters in DDP.
+* Trainable Modules
+ * `--trainable_models`: Models to train, e.g., dit, vae, text_encoder.
+ * `--lora_base_model`: Which model LoRA is added to.
+ * `--lora_target_modules`: Which layers LoRA is added to.
+ * `--lora_rank`: Rank of LoRA.
+ * `--lora_checkpoint`: Path to the LoRA checkpoint. If provided, LoRA will be loaded from this checkpoint.
+* Extra Inputs
+ * `--extra_inputs`: Additional model inputs, comma-separated.
+* VRAM Management
+ * `--use_gradient_checkpointing_offload`: Whether to offload gradient checkpointing to CPU memory.
+
+Additionally, the training framework is built upon [`accelerate`](https://huggingface.co/docs/accelerate/index). Before starting training, run `accelerate config` to configure GPU-related parameters. For certain training scripts (e.g., full fine-tuning of 14B models), we provide recommended `accelerate` configuration files, which can be found in the corresponding training scripts.
+
+
+
+
+
+
+Step 1: Prepare the Dataset
+
+The dataset consists of a series of files. We recommend organizing your dataset as follows:
+
+```
+data/example_video_dataset/
+├── metadata.csv
+├── video1.mp4
+└── video2.mp4
+```
+
+Here, `video1.mp4` and `video2.mp4` are training video files, and `metadata.csv` is the metadata list, for example:
+
+```
+video,prompt
+video1.mp4,"from sunset to night, a small town, light, house, river"
+video2.mp4,"a dog is running"
+```
+
+We have prepared a sample video dataset to help you test. You can download it using the following command:
+
+```shell
+modelscope download --dataset DiffSynth-Studio/example_video_dataset --local_dir ./data/example_video_dataset
+```
+
+The dataset supports mixed training of videos and images. Supported video formats include `"mp4", "avi", "mov", "wmv", "mkv", "flv", "webm"`, and supported image formats include `"jpg", "jpeg", "png", "webp"`.
+
+The resolution of videos can be controlled via script parameters `--height`, `--width`, and `--num_frames`. For each video, the first `num_frames` frames will be used for training; therefore, an error will occur if the video length is less than `num_frames`. Image files will be treated as single-frame videos. When both `--height` and `--width` are left empty, dynamic resolution will be enabled, meaning training will use the actual resolution of each video or image in the dataset.
+
+**We strongly recommend using fixed-resolution training and avoiding mixing images and videos in the same dataset due to load balancing issues in multi-GPU training.**
+
+When the model requires additional inputs, such as the `control_video` needed by control-capable models like [`PAI/Wan2.1-Fun-1.3B-Control`](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-Control), please add corresponding columns in the metadata file, for example:
+
+```
+video,prompt,control_video
+video1.mp4,"from sunset to night, a small town, light, house, river",video1_softedge.mp4
+```
+
+If additional inputs contain video or image files, their column names need to be specified in the `--data_file_keys` parameter. The default value of this parameter is `"image,video"`, meaning it parses columns named `image` and `video`. You can extend this list based on the additional input requirements, for example: `--data_file_keys "image,video,control_video"`, and also enable `--input_contains_control_video`.
+
+
+
+
+
+
+Step 2: Load the Model
+
+Similar to the model loading logic during inference, you can configure the model to be loaded directly via its model ID. For instance, during inference we load the model using:
+
+```python
+model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="Wan2.1_VAE.pth"),
+]
+```
+
+During training, simply use the following parameter to load the corresponding model:
+
+```shell
+--model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-1.3B:Wan2.1_VAE.pth"
+```
+
+If you want to load the model from local files, for example during inference:
+
+```python
+model_configs=[
+ ModelConfig(path=[
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors",
+ ]),
+ ModelConfig(path="models/Wan-AI/Wan2.1-T2V-14B/models_t5_umt5-xxl-enc-bf16.pth"),
+ ModelConfig(path="models/Wan-AI/Wan2.1-T2V-14B/Wan2.1_VAE.pth"),
+]
+```
+
+Then during training, set the parameter as:
+
+```shell
+--model_paths '[
+ [
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors"
+ ],
+ "models/Wan-AI/Wan2.1-T2V-14B/models_t5_umt5-xxl-enc-bf16.pth",
+ "models/Wan-AI/Wan2.1-T2V-14B/Wan2.1_VAE.pth"
+]' \
+```
+
+
+
+
+
+
+Step 3: Configure Trainable Modules
+
+The training framework supports full fine-tuning of base models or LoRA-based training. Here are some examples:
+
+* Full fine-tuning of the DiT module: `--trainable_models dit`
+* Training a LoRA model for the DiT module: `--lora_base_model dit --lora_target_modules "q,k,v,o,ffn.0,ffn.2" --lora_rank 32`
+* Training both a LoRA model for DiT and the Motion Controller (yes, you can train such advanced structures): `--trainable_models motion_controller --lora_base_model dit --lora_target_modules "q,k,v,o,ffn.0,ffn.2" --lora_rank 32`
+
+Additionally, since multiple modules (text encoder, dit, vae) are loaded in the training script, you need to remove prefixes when saving model files. For example, when fully fine-tuning the DiT module or training a LoRA version of DiT, please set `--remove_prefix_in_ckpt pipe.dit.`
+
+
+
+
+
+
+Step 4: Launch the Training Process
+
+We have prepared training commands for each model. Please refer to the table at the beginning of this document.
+
+Note that full fine-tuning of the 14B model requires 8 GPUs, each with at least 80GB VRAM. During full fine-tuning of these 14B models, you must install `deepspeed` (`pip install deepspeed`). We have provided recommended [configuration files](./model_training/full/accelerate_config_14B.yaml), which will be loaded automatically in the corresponding training scripts. These scripts have been tested on 8*A100.
+
+The default video resolution in the training script is `480*832*81`. Increasing the resolution may cause out-of-memory errors. To reduce VRAM usage, add the parameter `--use_gradient_checkpointing_offload`.
+
+
+
+## Gallery
+
+1.3B text-to-video:
+
+https://github.com/user-attachments/assets/124397be-cd6a-4f29-a87c-e4c695aaabb8
+
+Put sunglasses on the dog (1.3B video-to-video):
+
+https://github.com/user-attachments/assets/272808d7-fbeb-4747-a6df-14a0860c75fb
+
+14B text-to-video:
+
+https://github.com/user-attachments/assets/3908bc64-d451-485a-8b61-28f6d32dd92f
+
+14B image-to-video:
+
+https://github.com/user-attachments/assets/c0bdd5ca-292f-45ed-b9bc-afe193156e75
+
+LoRA training:
+
+https://github.com/user-attachments/assets/9bd8e30b-97e8-44f9-bb6f-da004ba376a9
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/README_zh.md b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/README_zh.md
new file mode 100644
index 0000000000000000000000000000000000000000..57a36c7a813ee60164618872dde80c1f8c63c81d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/README_zh.md
@@ -0,0 +1,465 @@
+# 通义万相(Wan)
+
+[Switch to English](./README.md)
+
+Wan 是由阿里巴巴通义实验室开源的一系列视频生成模型。
+
+**DiffSynth-Studio 启用了新的推理和训练框架,如需使用旧版本,请点击[这里](https://github.com/modelscope/DiffSynth-Studio/tree/3edf3583b1f08944cee837b94d9f84d669c2729c)。**
+
+## 安装
+
+在使用本系列模型之前,请通过源码安装 DiffSynth-Studio。
+
+```shell
+git clone https://github.com/modelscope/DiffSynth-Studio.git
+cd DiffSynth-Studio
+pip install -e .
+```
+
+## 快速开始
+
+通过运行以下代码可以快速加载 [Wan-AI/Wan2.1-T2V-1.3B](https://www.modelscope.cn/models/Wan-AI/Wan2.1-T2V-1.3B) 模型并进行推理
+
+```python
+import torch
+from diffsynth import save_video
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+video = pipe(
+ prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+)
+save_video(video, "video1.mp4", fps=15, quality=5)
+```
+
+## 模型总览
+
+|模型 ID|额外参数|推理|全量训练|全量训练后验证|LoRA 训练|LoRA 训练后验证|
+|-|-|-|-|-|-|-|
+|[Wan-AI/Wan2.2-S2V-14B](https://www.modelscope.cn/models/Wan-AI/Wan2.2-S2V-14B)|`input_image`, `input_audio`, `audio_sample_rate`, `s2v_pose_video`|[code](./model_inference/Wan2.2-S2V-14B.py)|-|-|-|-|
+|[Wan-AI/Wan2.2-I2V-A14B](https://modelscope.cn/models/Wan-AI/Wan2.2-I2V-A14B)|`input_image`|[code](./model_inference/Wan2.2-I2V-A14B.py)|[code](./model_training/full/Wan2.2-I2V-A14B.sh)|[code](./model_training/validate_full/Wan2.2-I2V-A14B.py)|[code](./model_training/lora/Wan2.2-I2V-A14B.sh)|[code](./model_training/validate_lora/Wan2.2-I2V-A14B.py)|
+|[Wan-AI/Wan2.2-T2V-A14B](https://modelscope.cn/models/Wan-AI/Wan2.2-T2V-A14B)||[code](./model_inference/Wan2.2-T2V-A14B.py)|[code](./model_training/full/Wan2.2-T2V-A14B.sh)|[code](./model_training/validate_full/Wan2.2-T2V-A14B.py)|[code](./model_training/lora/Wan2.2-T2V-A14B.sh)|[code](./model_training/validate_lora/Wan2.2-T2V-A14B.py)|
+|[Wan-AI/Wan2.2-TI2V-5B](https://modelscope.cn/models/Wan-AI/Wan2.2-TI2V-5B)|`input_image`|[code](./model_inference/Wan2.2-TI2V-5B.py)|[code](./model_training/full/Wan2.2-TI2V-5B.sh)|[code](./model_training/validate_full/Wan2.2-TI2V-5B.py)|[code](./model_training/lora/Wan2.2-TI2V-5B.sh)|[code](./model_training/validate_lora/Wan2.2-TI2V-5B.py)|
+|[Wan-AI/Wan2.1-T2V-1.3B](https://modelscope.cn/models/Wan-AI/Wan2.1-T2V-1.3B)||[code](./model_inference/Wan2.1-T2V-1.3B.py)|[code](./model_training/full/Wan2.1-T2V-1.3B.sh)|[code](./model_training/validate_full/Wan2.1-T2V-1.3B.py)|[code](./model_training/lora/Wan2.1-T2V-1.3B.sh)|[code](./model_training/validate_lora/Wan2.1-T2V-1.3B.py)|
+|[Wan-AI/Wan2.1-T2V-14B](https://modelscope.cn/models/Wan-AI/Wan2.1-T2V-14B)||[code](./model_inference/Wan2.1-T2V-14B.py)|[code](./model_training/full/Wan2.1-T2V-14B.sh)|[code](./model_training/validate_full/Wan2.1-T2V-14B.py)|[code](./model_training/lora/Wan2.1-T2V-14B.sh)|[code](./model_training/validate_lora/Wan2.1-T2V-14B.py)|
+|[Wan-AI/Wan2.1-I2V-14B-480P](https://modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-480P)|`input_image`|[code](./model_inference/Wan2.1-I2V-14B-480P.py)|[code](./model_training/full/Wan2.1-I2V-14B-480P.sh)|[code](./model_training/validate_full/Wan2.1-I2V-14B-480P.py)|[code](./model_training/lora/Wan2.1-I2V-14B-480P.sh)|[code](./model_training/validate_lora/Wan2.1-I2V-14B-480P.py)|
+|[Wan-AI/Wan2.1-I2V-14B-720P](https://modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-720P)|`input_image`|[code](./model_inference/Wan2.1-I2V-14B-720P.py)|[code](./model_training/full/Wan2.1-I2V-14B-720P.sh)|[code](./model_training/validate_full/Wan2.1-I2V-14B-720P.py)|[code](./model_training/lora/Wan2.1-I2V-14B-720P.sh)|[code](./model_training/validate_lora/Wan2.1-I2V-14B-720P.py)|
+|[Wan-AI/Wan2.1-FLF2V-14B-720P](https://modelscope.cn/models/Wan-AI/Wan2.1-FLF2V-14B-720P)|`input_image`, `end_image`|[code](./model_inference/Wan2.1-FLF2V-14B-720P.py)|[code](./model_training/full/Wan2.1-FLF2V-14B-720P.sh)|[code](./model_training/validate_full/Wan2.1-FLF2V-14B-720P.py)|[code](./model_training/lora/Wan2.1-FLF2V-14B-720P.sh)|[code](./model_training/validate_lora/Wan2.1-FLF2V-14B-720P.py)|
+|[PAI/Wan2.1-Fun-1.3B-InP](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-InP)|`input_image`, `end_image`|[code](./model_inference/Wan2.1-Fun-1.3B-InP.py)|[code](./model_training/full/Wan2.1-Fun-1.3B-InP.sh)|[code](./model_training/validate_full/Wan2.1-Fun-1.3B-InP.py)|[code](./model_training/lora/Wan2.1-Fun-1.3B-InP.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-1.3B-InP.py)|
+|[PAI/Wan2.1-Fun-1.3B-Control](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-Control)|`control_video`|[code](./model_inference/Wan2.1-Fun-1.3B-Control.py)|[code](./model_training/full/Wan2.1-Fun-1.3B-Control.sh)|[code](./model_training/validate_full/Wan2.1-Fun-1.3B-Control.py)|[code](./model_training/lora/Wan2.1-Fun-1.3B-Control.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-1.3B-Control.py)|
+|[PAI/Wan2.1-Fun-14B-InP](https://modelscope.cn/models/PAI/Wan2.1-Fun-14B-InP)|`input_image`, `end_image`|[code](./model_inference/Wan2.1-Fun-14B-InP.py)|[code](./model_training/full/Wan2.1-Fun-14B-InP.sh)|[code](./model_training/validate_full/Wan2.1-Fun-14B-InP.py)|[code](./model_training/lora/Wan2.1-Fun-14B-InP.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-14B-InP.py)|
+|[PAI/Wan2.1-Fun-14B-Control](https://modelscope.cn/models/PAI/Wan2.1-Fun-14B-Control)|`control_video`|[code](./model_inference/Wan2.1-Fun-14B-Control.py)|[code](./model_training/full/Wan2.1-Fun-14B-Control.sh)|[code](./model_training/validate_full/Wan2.1-Fun-14B-Control.py)|[code](./model_training/lora/Wan2.1-Fun-14B-Control.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-14B-Control.py)|
+|[PAI/Wan2.1-Fun-V1.1-1.3B-Control](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-1.3B-Control)|`control_video`, `reference_image`|[code](./model_inference/Wan2.1-Fun-V1.1-1.3B-Control.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-1.3B-Control.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control.py)|
+|[PAI/Wan2.1-Fun-V1.1-14B-Control](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-Control)|`control_video`, `reference_image`|[code](./model_inference/Wan2.1-Fun-V1.1-14B-Control.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-14B-Control.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-14B-Control.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control.py)|
+|[PAI/Wan2.1-Fun-V1.1-1.3B-InP](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-1.3B-InP)|`input_image`, `end_image`|[code](./model_inference/Wan2.1-Fun-V1.1-1.3B-InP.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-1.3B-InP.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-InP.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-1.3B-InP.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-InP.py)|
+|[PAI/Wan2.1-Fun-V1.1-14B-InP](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-InP)|`input_image`, `end_image`|[code](./model_inference/Wan2.1-Fun-V1.1-14B-InP.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-14B-InP.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-14B-InP.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-14B-InP.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-14B-InP.py)|
+|[PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera)|`control_camera_video`, `input_image`|[code](./model_inference/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py)|
+|[PAI/Wan2.1-Fun-V1.1-14B-Control-Camera](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-Control-Camera)|`control_camera_video`, `input_image`|[code](./model_inference/Wan2.1-Fun-V1.1-14B-Control-Camera.py)|[code](./model_training/full/Wan2.1-Fun-V1.1-14B-Control-Camera.sh)|[code](./model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control-Camera.py)|[code](./model_training/lora/Wan2.1-Fun-V1.1-14B-Control-Camera.sh)|[code](./model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control-Camera.py)|
+|[iic/VACE-Wan2.1-1.3B-Preview](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview)|`vace_control_video`, `vace_reference_image`|[code](./model_inference/Wan2.1-VACE-1.3B-Preview.py)|[code](./model_training/full/Wan2.1-VACE-1.3B-Preview.sh)|[code](./model_training/validate_full/Wan2.1-VACE-1.3B-Preview.py)|[code](./model_training/lora/Wan2.1-VACE-1.3B-Preview.sh)|[code](./model_training/validate_lora/Wan2.1-VACE-1.3B-Preview.py)|
+|[Wan-AI/Wan2.1-VACE-1.3B](https://modelscope.cn/models/Wan-AI/Wan2.1-VACE-1.3B)|`vace_control_video`, `vace_reference_image`|[code](./model_inference/Wan2.1-VACE-1.3B.py)|[code](./model_training/full/Wan2.1-VACE-1.3B.sh)|[code](./model_training/validate_full/Wan2.1-VACE-1.3B.py)|[code](./model_training/lora/Wan2.1-VACE-1.3B.sh)|[code](./model_training/validate_lora/Wan2.1-VACE-1.3B.py)|
+|[Wan-AI/Wan2.1-VACE-14B](https://modelscope.cn/models/Wan-AI/Wan2.1-VACE-14B)|`vace_control_video`, `vace_reference_image`|[code](./model_inference/Wan2.1-VACE-14B.py)|[code](./model_training/full/Wan2.1-VACE-14B.sh)|[code](./model_training/validate_full/Wan2.1-VACE-14B.py)|[code](./model_training/lora/Wan2.1-VACE-14B.sh)|[code](./model_training/validate_lora/Wan2.1-VACE-14B.py)|
+|[DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1)|`motion_bucket_id`|[code](./model_inference/Wan2.1-1.3b-speedcontrol-v1.py)|[code](./model_training/full/Wan2.1-1.3b-speedcontrol-v1.sh)|[code](./model_training/validate_full/Wan2.1-1.3b-speedcontrol-v1.py)|[code](./model_training/lora/Wan2.1-1.3b-speedcontrol-v1.sh)|[code](./model_training/validate_lora/Wan2.1-1.3b-speedcontrol-v1.py)|
+
+## 模型推理
+
+以下部分将会帮助您理解我们的功能并编写推理代码。
+
+
+
+加载模型
+
+模型通过 `from_pretrained` 加载:
+
+```python
+import torch
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth"),
+ ],
+)
+```
+
+其中 `torch_dtype` 和 `device` 是计算精度和计算设备。`model_configs` 可通过多种方式配置模型路径:
+
+* 从[魔搭社区](https://modelscope.cn/)下载模型并加载。此时需要填写 `model_id` 和 `origin_file_pattern`,例如
+
+```python
+ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors")
+```
+
+* 从本地文件路径加载模型。此时需要填写 `path`,例如
+
+```python
+ModelConfig(path="models/Wan-AI/Wan2.1-T2V-1.3B/diffusion_pytorch_model.safetensors")
+```
+
+对于从多个文件加载的单一模型,使用列表即可,例如
+
+```python
+ModelConfig(path=[
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors",
+])
+```
+
+`ModelConfig` 提供了额外的参数用于控制模型加载时的行为:
+
+* `local_model_path`: 用于保存下载模型的路径,默认值为 `"./models"`。
+* `skip_download`: 是否跳过下载,默认值为 `False`。当您的网络无法访问[魔搭社区](https://modelscope.cn/)时,请手动下载必要的文件,并将其设置为 `True`。
+
+`from_pretrained` 提供了额外的参数用于控制模型加载时的行为:
+
+* `tokenizer_config`: Wan 模型的 tokenizer 路径,默认值为 `ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="google/*")`。
+* `redirect_common_files`: 是否重定向重复模型文件,默认值为 `True`。由于 Wan 系列模型包括多个基础模型,每个基础模型的 text encoder 等模块都是相同的,为避免重复下载,我们会对模型路径进行重定向。
+* `use_usp`: 是否启用 Unified Sequence Parallel,默认值为 `False`。用于多 GPU 并行推理。
+
+
+
+
+
+
+显存管理
+
+DiffSynth-Studio 为 Wan 模型提供了细粒度的显存管理,让模型能够在低显存设备上进行推理,可通过以下代码开启 offload 功能,在显存有限的设备上将部分模块 offload 到内存中。
+
+```python
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+```
+
+FP8 量化功能也是支持的:
+
+```python
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_dtype=torch.float8_e4m3fn),
+ ],
+)
+pipe.enable_vram_management()
+```
+
+FP8 量化和 offload 可同时开启:
+
+```python
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu", offload_dtype=torch.float8_e4m3fn),
+ ],
+)
+pipe.enable_vram_management()
+```
+
+FP8 量化能够大幅度减少显存占用,但不会加速,部分模型在 FP8 量化下会出现精度不足导致的画面模糊、撕裂、失真问题,请谨慎使用 FP8 量化。
+
+开启显存管理后,框架会自动根据设备上的剩余显存确定显存管理策略。`enable_vram_management` 函数提供了以下参数,用于手动控制显存管理策略:
+
+* `vram_limit`: 显存占用量限制(GB),默认占用设备上的剩余显存。注意这不是一个绝对限制,当设置的显存不足以支持模型进行推理,但实际可用显存足够时,将会以最小化显存占用的形式进行推理。将其设置为0时,将会实现理论最小显存占用。
+* `vram_buffer`: 显存缓冲区大小(GB),默认为 0.5GB。由于部分较大的神经网络层在 onload 阶段会不可控地占用更多显存,因此一个显存缓冲区是必要的,理论上的最优值为模型中最大的层所占的显存。
+* `num_persistent_param_in_dit`: DiT 模型中常驻显存的参数数量(个),默认为无限制。我们将会在未来删除这个参数,请不要依赖这个参数。
+
+
+
+
+
+
+推理加速
+
+Wan 支持多种加速方案,包括
+
+* 高效注意力机制实现:当您的 Python 环境中安装过这些注意力机制实现方案时,我们将会按照以下优先级自动启用。
+ * [Flash Attention 3](https://github.com/Dao-AILab/flash-attention)
+ * [Flash Attention 2](https://github.com/Dao-AILab/flash-attention)
+ * [Sage Attention](https://github.com/thu-ml/SageAttention)
+ * [torch SDPA](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) (默认设置,建议安装 `torch>=2.5.0`)
+* 统一序列并行:基于 [xDiT](https://github.com/xdit-project/xDiT) 实现的序列并行,请参考[示例代码](./acceleration/unified_sequence_parallel.py),使用以下命令运行:
+
+```shell
+pip install "xfuser[flash-attn]>=0.4.3"
+torchrun --standalone --nproc_per_node=8 examples/wanvideo/acceleration/unified_sequence_parallel.py
+```
+
+* TeaCache:加速技术 [TeaCache](https://github.com/ali-vilab/TeaCache),请参考[示例代码](./acceleration/teacache.py)。
+
+
+
+
+
+
+输入参数
+
+Pipeline 在推理阶段能够接收以下输入参数:
+
+* `prompt`: 提示词,描述画面中出现的内容。
+* `negative_prompt`: 负向提示词,描述画面中不应该出现的内容,默认值为 `""`。
+* `input_image`: 输入图片,适用于图生视频模型,例如 [`Wan-AI/Wan2.1-I2V-14B-480P`](https://modelscope.cn/models/Wan-AI/Wan2.1-I2V-14B-480P)、[`PAI/Wan2.1-Fun-1.3B-InP`](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-InP),以及首尾帧模型,例如 [`Wan-AI/Wan2.1-FLF2V-14B-720P`](Wan-AI/Wan2.1-FLF2V-14B-720P)。
+* `end_image`: 结尾帧,适用于首尾帧模型,例如 [`Wan-AI/Wan2.1-FLF2V-14B-720P`](Wan-AI/Wan2.1-FLF2V-14B-720P)。
+* `input_video`: 输入视频,用于视频生视频,适用于任意 Wan 系列模型,需与参数 `denoising_strength` 配合使用。
+* `denoising_strength`: 去噪强度,范围为 [0, 1]。数值越小,生成的视频越接近 `input_video`。
+* `control_video`: 控制视频,适用于带控制能力的 Wan 系列模型,例如 [`PAI/Wan2.1-Fun-1.3B-Control`](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-Control)。
+* `reference_image`: 参考图片,适用于带参考图能力的 Wan 系列模型,例如 [`PAI/Wan2.1-Fun-V1.1-1.3B-Control`](https://modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-1.3B-Control)。
+* `camera_control_direction`: 镜头控制方向,可选 "Left", "Right", "Up", "Down", "LeftUp", "LeftDown", "RightUp", "RightDown" 之一,适用于 Camera-Control 模型,例如 [PAI/Wan2.1-Fun-V1.1-14B-Control-Camera](https://www.modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-Control-Camera)。
+* `camera_control_speed`: 镜头控制速度,适用于 Camera-Control 模型,例如 [PAI/Wan2.1-Fun-V1.1-14B-Control-Camera](https://www.modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-Control-Camera)。
+* `camera_control_origin`: 镜头控制序列的原点坐标,请参考[原论文](https://arxiv.org/pdf/2404.02101)进行设置,适用于 Camera-Control 模型,例如 [PAI/Wan2.1-Fun-V1.1-14B-Control-Camera](https://www.modelscope.cn/models/PAI/Wan2.1-Fun-V1.1-14B-Control-Camera)。
+* `vace_video`: VACE 模型的输入视频,适用于 VACE 系列模型,例如 [`iic/VACE-Wan2.1-1.3B-Preview`](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview)。
+* `vace_video_mask`: VACE 模型的 mask 视频,适用于 VACE 系列模型,例如 [`iic/VACE-Wan2.1-1.3B-Preview`](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview)。
+* `vace_reference_image`: VACE 模型的参考图片,适用于 VACE 系列模型,例如 [`iic/VACE-Wan2.1-1.3B-Preview`](https://modelscope.cn/models/iic/VACE-Wan2.1-1.3B-Preview)。
+* `vace_scale`: VACE 模型对基础模型的影响程度,默认为1。数值越大,控制强度越高,但画面崩坏概率越大。
+* `seed`: 随机种子。默认为 `None`,即完全随机。
+* `rand_device`: 生成随机高斯噪声矩阵的计算设备,默认为 `"cpu"`。当设置为 `cuda` 时,在不同 GPU 上会导致不同的生成结果。
+* `height`: 帧高度,默认为 480。需设置为 16 的倍数,不满足时向上取整。
+* `width`: 帧宽度,默认为 832。需设置为 16 的倍数,不满足时向上取整。
+* `num_frames`: 帧数,默认为 81。需设置为 4 的倍数 + 1,不满足时向上取整,最小值为 1。
+* `cfg_scale`: Classifier-free guidance 机制的数值,默认为 5。数值越大,提示词的控制效果越强,但画面崩坏的概率越大。
+* `cfg_merge`: 是否合并 Classifier-free guidance 的两侧进行统一推理,默认为 `False`。该参数目前仅在基础的文生视频和图生视频模型上生效。
+* `switch_DiT_boundary`: 切换 DiT 模型的时间点,默认值为 0.875,仅对多 DiT 的混合模型生效,例如 [Wan-AI/Wan2.2-I2V-A14B](https://modelscope.cn/models/Wan-AI/Wan2.2-I2V-A14B)。
+* `num_inference_steps`: 推理次数,默认值为 50。
+* `sigma_shift`: Rectified Flow 理论中的参数,默认为 5。数值越大,模型在去噪的开始阶段停留的步骤数越多,可适当调大这个参数来提高画面质量,但会因生成过程与训练过程不一致导致生成的视频内容与训练数据存在差异。
+* `motion_bucket_id`: 运动幅度,范围为 [0, 100]。适用于速度控制模块,例如 [`DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1`](https://modelscope.cn/models/DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1),数值越大,运动幅度越大。
+* `tiled`: 是否启用 VAE 分块推理,默认为 `False`。设置为 `True` 时可显著减少 VAE 编解码阶段的显存占用,会产生少许误差,以及少量推理时间延长。
+* `tile_size`: VAE 编解码阶段的分块大小,默认为 (30, 52),仅在 `tiled=True` 时生效。
+* `tile_stride`: VAE 编解码阶段的分块步长,默认为 (15, 26),仅在 `tiled=True` 时生效,需保证其数值小于或等于 `tile_size`。
+* `sliding_window_size`: DiT 部分的滑动窗口大小。实验性功能,效果不稳定。
+* `sliding_window_stride`: DiT 部分的滑动窗口步长。实验性功能,效果不稳定。
+* `tea_cache_l1_thresh`: TeaCache 的阈值,数值越大,速度越快,画面质量越差。请注意,开启 TeaCache 后推理速度并非均匀,因此进度条上显示的剩余时间将会变得不准确。
+* `tea_cache_model_id`: TeaCache 的参数模板,可选 `"Wan2.1-T2V-1.3B"`、`Wan2.1-T2V-14B`、`Wan2.1-I2V-14B-480P`、`Wan2.1-I2V-14B-720P` 之一。
+* `progress_bar_cmd`: 进度条,默认为 `tqdm.tqdm`。可通过设置为 `lambda x:x` 来屏蔽进度条。
+
+
+
+
+## 模型训练
+
+Wan 系列模型训练通过统一的 [`./model_training/train.py`](./model_training/train.py) 脚本进行。
+
+
+
+脚本参数
+
+脚本包含以下参数:
+
+* 数据集
+ * `--dataset_base_path`: 数据集的根路径。
+ * `--dataset_metadata_path`: 数据集的元数据文件路径。
+ * `--height`: 图像或视频的高度。将 `height` 和 `width` 留空以启用动态分辨率。
+ * `--width`: 图像或视频的宽度。将 `height` 和 `width` 留空以启用动态分辨率。
+ * `--num_frames`: 每个视频中的帧数。帧从视频前缀中采样。
+ * `--data_file_keys`: 元数据中的数据文件键。用逗号分隔。
+ * `--dataset_repeat`: 每个 epoch 中数据集重复的次数。
+ * `--dataset_num_workers`: 每个 Dataloder 的进程数量。
+* 模型
+ * `--model_paths`: 要加载的模型路径。JSON 格式。
+ * `--model_id_with_origin_paths`: 带原始路径的模型 ID,例如 Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors。用逗号分隔。
+ * `--max_timestep_boundary`: Timestep 区间最大值,范围为 0~1,默认为 1,仅在多 DiT 的混合模型训练中需要手动设置,例如 [Wan-AI/Wan2.2-I2V-A14B](https://modelscope.cn/models/Wan-AI/Wan2.2-I2V-A14B)。
+ * `--min_timestep_boundary`: Timestep 区间最小值,范围为 0~1,默认为 1,仅在多 DiT 的混合模型训练中需要手动设置,例如 [Wan-AI/Wan2.2-I2V-A14B](https://modelscope.cn/models/Wan-AI/Wan2.2-I2V-A14B)。
+* 训练
+ * `--learning_rate`: 学习率。
+ * `--weight_decay`:权重衰减大小。
+ * `--num_epochs`: 轮数(Epoch)。
+ * `--output_path`: 保存路径。
+ * `--remove_prefix_in_ckpt`: 在 ckpt 中移除前缀。
+ * `--save_steps`: 保存模型的间隔 step 数量,如果设置为 None ,则每个 epoch 保存一次
+ * `--find_unused_parameters`: DDP 训练中是否存在未使用的参数
+* 可训练模块
+ * `--trainable_models`: 可训练的模型,例如 dit、vae、text_encoder。
+ * `--lora_base_model`: LoRA 添加到哪个模型上。
+ * `--lora_target_modules`: LoRA 添加到哪一层上。
+ * `--lora_rank`: LoRA 的秩(Rank)。
+ * `--lora_checkpoint`: LoRA 检查点的路径。如果提供此路径,LoRA 将从此检查点加载。
+* 额外模型输入
+ * `--extra_inputs`: 额外的模型输入,以逗号分隔。
+* 显存管理
+ * `--use_gradient_checkpointing_offload`: 是否将 gradient checkpointing 卸载到内存中。
+
+此外,训练框架基于 [`accelerate`](https://huggingface.co/docs/accelerate/index) 构建,在开始训练前运行 `accelerate config` 可配置 GPU 的相关参数。对于部分模型训练(例如 14B 模型的全量训练)脚本,我们提供了建议的 `accelerate` 配置文件,可在对应的训练脚本中查看。
+
+
+
+
+
+
+Step 1: 准备数据集
+
+数据集包含一系列文件,我们建议您这样组织数据集文件:
+
+```
+data/example_video_dataset/
+├── metadata.csv
+├── video1.mp4
+└── video2.mp4
+```
+
+其中 `video1.mp4`、`video2.mp4` 为训练用视频数据,`metadata.csv` 为元数据列表,例如
+
+```
+video,prompt
+video1.mp4,"from sunset to night, a small town, light, house, river"
+video2.mp4,"a dog is running"
+```
+
+我们构建了一个样例视频数据集,以方便您进行测试,通过以下命令可以下载这个数据集:
+
+```shell
+modelscope download --dataset DiffSynth-Studio/example_video_dataset --local_dir ./data/example_video_dataset
+```
+
+数据集支持视频和图片混合训练,支持的视频文件格式包括 `"mp4", "avi", "mov", "wmv", "mkv", "flv", "webm"`,支持的图片格式包括 `"jpg", "jpeg", "png", "webp"`。
+
+视频的尺寸可通过脚本参数 `--height`、`--width`、`--num_frames` 控制。在每个视频中,前 `num_frames` 帧会被用于训练,因此当视频长度不足 `num_frames` 帧时会报错,图片文件会被视为单帧视频。当 `--height` 和 `--width` 为空时将会开启动态分辨率,按照数据集中每个视频或图片的实际宽高训练。
+
+**我们强烈建议使用固定分辨率训练,并避免图像和视频混合训练,因为在多卡训练中存在负载均衡问题。**
+
+当模型需要额外输入时,例如具备控制能力的模型 [`PAI/Wan2.1-Fun-1.3B-Control`](https://modelscope.cn/models/PAI/Wan2.1-Fun-1.3B-Control) 所需的 `control_video`,请在数据集中补充相应的列,例如:
+
+```
+video,prompt,control_video
+video1.mp4,"from sunset to night, a small town, light, house, river",video1_softedge.mp4
+```
+
+额外输入若包含视频和图像文件,则需要在 `--data_file_keys` 参数中指定要解析的列名。该参数的默认值为 `"image,video"`,即解析列名为 `image` 和 `video` 的列。可根据额外输入增加相应的列名,例如 `--data_file_keys "image,video,control_video"`,同时启用 `--input_contains_control_video`。
+
+
+
+
+
+
+Step 2: 加载模型
+
+类似于推理时的模型加载逻辑,可直接通过模型 ID 配置要加载的模型。例如,推理时我们通过以下设置加载模型
+
+```python
+model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="Wan2.1_VAE.pth"),
+]
+```
+
+那么在训练时,填入以下参数即可加载对应的模型。
+
+```shell
+--model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-1.3B:Wan2.1_VAE.pth"
+```
+
+如果您希望从本地文件加载模型,例如推理时
+
+```python
+model_configs=[
+ ModelConfig(path=[
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors",
+ ]),
+ ModelConfig(path="models/Wan-AI/Wan2.1-T2V-14B/models_t5_umt5-xxl-enc-bf16.pth"),
+ ModelConfig(path="models/Wan-AI/Wan2.1-T2V-14B/Wan2.1_VAE.pth"),
+]
+```
+
+那么训练时需设置为
+
+```shell
+--model_paths '[
+ [
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00001-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00002-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00003-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00004-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00005-of-00006.safetensors",
+ "models/Wan-AI/Wan2.1-T2V-14B/diffusion_pytorch_model-00006-of-00006.safetensors"
+ ],
+ "models/Wan-AI/Wan2.1-T2V-14B/models_t5_umt5-xxl-enc-bf16.pth",
+ "models/Wan-AI/Wan2.1-T2V-14B/Wan2.1_VAE.pth"
+]' \
+```
+
+
+
+
+
+
+Step 3: 设置可训练模块
+
+训练框架支持训练基础模型,或 LoRA 模型。以下是几个例子:
+
+* 全量训练 DiT 部分:`--trainable_models dit`
+* 训练 DiT 部分的 LoRA 模型:`--lora_base_model dit --lora_target_modules "q,k,v,o,ffn.0,ffn.2" --lora_rank 32`
+* 训练 DiT 部分的 LoRA 和 Motion Controller 部分(是的,可以训练这种花里胡哨的结构):`--trainable_models motion_controller --lora_base_model dit --lora_target_modules "q,k,v,o,ffn.0,ffn.2" --lora_rank 32`
+
+此外,由于训练脚本中加载了多个模块(text encoder、dit、vae),保存模型文件时需要移除前缀,例如在全量训练 DiT 部分或者训练 DiT 部分的 LoRA 模型时,请设置 `--remove_prefix_in_ckpt pipe.dit.`
+
+
+
+
+
+
+Step 4: 启动训练程序
+
+我们为每一个模型编写了训练命令,请参考本文档开头的表格。
+
+请注意,14B 模型全量训练需要8个GPU,每个GPU的显存至少为80G。全量训练这些14B模型时需要安装 `deepspeed`(`pip install deepspeed`),我们编写了建议的[配置文件](./model_training/full/accelerate_config_14B.yaml),这个配置文件会在对应的训练脚本中被加载,这些脚本已在 8*A100 上测试过。
+
+训练脚本的默认视频尺寸为 `480*832*81`,提升分辨率将可能导致显存不足,请添加参数 `--use_gradient_checkpointing_offload` 降低显存占用。
+
+
+
+## 案例展示
+
+1.3B 文生视频:
+
+https://github.com/user-attachments/assets/124397be-cd6a-4f29-a87c-e4c695aaabb8
+
+给狗狗戴上墨镜(1.3B 视频生视频):
+
+https://github.com/user-attachments/assets/272808d7-fbeb-4747-a6df-14a0860c75fb
+
+14B 文生视频:
+
+https://github.com/user-attachments/assets/3908bc64-d451-485a-8b61-28f6d32dd92f
+
+14B 图生视频:
+
+https://github.com/user-attachments/assets/c0bdd5ca-292f-45ed-b9bc-afe193156e75
+
+LoRA 训练:
+
+https://github.com/user-attachments/assets/9bd8e30b-97e8-44f9-bb6f-da004ba376a9
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/acceleration/teacache.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/acceleration/teacache.py
new file mode 100644
index 0000000000000000000000000000000000000000..b88656a8f9861160d89e305dde9bccbbd651bd8e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/acceleration/teacache.py
@@ -0,0 +1,27 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+
+video = pipe(
+ prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+ # TeaCache parameters
+ tea_cache_l1_thresh=0.05, # The larger this value is, the faster the speed, but the worse the visual quality.
+ tea_cache_model_id="Wan2.1-T2V-1.3B", # Choose one in (Wan2.1-T2V-1.3B, Wan2.1-T2V-14B, Wan2.1-I2V-14B-480P, Wan2.1-I2V-14B-720P).
+)
+save_video(video, "video1.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/acceleration/unified_sequence_parallel.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/acceleration/unified_sequence_parallel.py
new file mode 100644
index 0000000000000000000000000000000000000000..44b580b1b24f21e6e988f89f71f41050ff0f8184
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/acceleration/unified_sequence_parallel.py
@@ -0,0 +1,27 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+import torch.distributed as dist
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ use_usp=True,
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+
+video = pipe(
+ prompt="一名宇航员身穿太空服,面朝镜头骑着一匹机械马在火星表面驰骋。红色的荒凉地表延伸至远方,点缀着巨大的陨石坑和奇特的岩石结构。机械马的步伐稳健,扬起微弱的尘埃,展现出未来科技与原始探索的完美结合。宇航员手持操控装置,目光坚定,仿佛正在开辟人类的新疆域。背景是深邃的宇宙和蔚蓝的地球,画面既科幻又充满希望,让人不禁畅想未来的星际生活。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+)
+if dist.get_rank() == 0:
+ save_video(video, "video1.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-1.3b-speedcontrol-v1.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-1.3b-speedcontrol-v1.py
new file mode 100644
index 0000000000000000000000000000000000000000..6efdc65c371ed426d40b30c14f6118fb0c3ce831
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-1.3b-speedcontrol-v1.py
@@ -0,0 +1,34 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1", origin_file_pattern="model.safetensors", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+# Text-to-video
+video = pipe(
+ prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=1, tiled=True,
+ motion_bucket_id=0
+)
+save_video(video, "video_slow.mp4", fps=15, quality=5)
+
+video = pipe(
+ prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=1, tiled=True,
+ motion_bucket_id=100
+)
+save_video(video, "video_fast.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-FLF2V-14B-720P.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-FLF2V-14B-720P.py
new file mode 100644
index 0000000000000000000000000000000000000000..30613982e8a208617728064a11291284dfb91309
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-FLF2V-14B-720P.py
@@ -0,0 +1,36 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=["data/examples/wan/first_frame.jpeg", "data/examples/wan/last_frame.jpeg"]
+)
+
+# First and last frame to video
+video = pipe(
+ prompt="写实风格,一个女生手持枯萎的花站在花园中,镜头逐渐拉远,记录下花园的全貌。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=Image.open("data/examples/wan/first_frame.jpeg").resize((960, 960)),
+ end_image=Image.open("data/examples/wan/last_frame.jpeg").resize((960, 960)),
+ seed=0, tiled=True,
+ height=960, width=960, num_frames=33,
+ sigma_shift=16,
+)
+save_video(video, "video.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..43374d28863613d925e75836037dd79d60184523
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-Control.py
@@ -0,0 +1,34 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=f"data/examples/wan/control_video.mp4"
+)
+
+# Control video
+control_video = VideoData("data/examples/wan/control_video.mp4", height=832, width=576)
+video = pipe(
+ prompt="扁平风格动漫,一位长发少女优雅起舞。她五官精致,大眼睛明亮有神,黑色长发柔顺光泽。身穿淡蓝色T恤和深蓝色牛仔短裤。背景是粉色。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=control_video, height=832, width=576, num_frames=49,
+ seed=1, tiled=True
+)
+save_video(video, "video.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..d921c0cb75e75083fba5ee44a07f2c70798ea09d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-1.3B-InP.py
@@ -0,0 +1,36 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=f"data/examples/wan/input_image.jpg"
+)
+image = Image.open("data/examples/wan/input_image.jpg")
+
+# First and last frame to video
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=image,
+ seed=0, tiled=True
+ # You can input `end_image=xxx` to control the last frame of the video.
+ # The model will automatically generate the dynamic content between `input_image` and `end_image`.
+)
+save_video(video, "video.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-14B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-14B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..db9e5c8a8aca923c93a681c53dcdaf44ad11f29f
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-14B-Control.py
@@ -0,0 +1,34 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=f"data/examples/wan/control_video.mp4"
+)
+
+# Control video
+control_video = VideoData("data/examples/wan/control_video.mp4", height=832, width=576)
+video = pipe(
+ prompt="扁平风格动漫,一位长发少女优雅起舞。她五官精致,大眼睛明亮有神,黑色长发柔顺光泽。身穿淡蓝色T恤和深蓝色牛仔短裤。背景是粉色。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=control_video, height=832, width=576, num_frames=49,
+ seed=1, tiled=True
+)
+save_video(video, "video.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-14B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-14B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..af227cbc73fbc92811a0fcffcdea527a26eee6cd
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-14B-InP.py
@@ -0,0 +1,36 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=f"data/examples/wan/input_image.jpg"
+)
+image = Image.open("data/examples/wan/input_image.jpg")
+
+# First and last frame to video
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=image,
+ seed=0, tiled=True
+ # You can input `end_image=xxx` to control the last frame of the video.
+ # The model will automatically generate the dynamic content between `input_image` and `end_image`.
+)
+save_video(video, "video.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py
new file mode 100644
index 0000000000000000000000000000000000000000..002c34c00c755941a6301467e0e1530d7f9889ee
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py
@@ -0,0 +1,44 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=f"data/examples/wan/input_image.jpg"
+)
+input_image = Image.open("data/examples/wan/input_image.jpg")
+
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+ input_image=input_image,
+ camera_control_direction="Left", camera_control_speed=0.01,
+)
+save_video(video, "video_left.mp4", fps=15, quality=5)
+
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+ input_image=input_image,
+ camera_control_direction="Up", camera_control_speed=0.01,
+)
+save_video(video, "video_up.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f7e4c82517bb387a21f32e775f928b740cd966e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-Control.py
@@ -0,0 +1,36 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=["data/examples/wan/control_video.mp4", "data/examples/wan/reference_image_girl.png"]
+)
+
+# Control video
+control_video = VideoData("data/examples/wan/control_video.mp4", height=832, width=576)
+reference_image = Image.open("data/examples/wan/reference_image_girl.png").resize((576, 832))
+video = pipe(
+ prompt="扁平风格动漫,一位长发少女优雅起舞。她五官精致,大眼睛明亮有神,黑色长发柔顺光泽。身穿淡蓝色T恤和深蓝色牛仔短裤。背景是粉色。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=control_video, reference_image=reference_image,
+ height=832, width=576, num_frames=49,
+ seed=1, tiled=True
+)
+save_video(video, "video.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2fc5604292b91263969589ad4dc3f395421b16d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-1.3B-InP.py
@@ -0,0 +1,36 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=f"data/examples/wan/input_image.jpg"
+)
+image = Image.open("data/examples/wan/input_image.jpg")
+
+# First and last frame to video
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=image,
+ seed=0, tiled=True
+ # You can input `end_image=xxx` to control the last frame of the video.
+ # The model will automatically generate the dynamic content between `input_image` and `end_image`.
+)
+save_video(video, "video.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control-Camera.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control-Camera.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e0c9a8883c1001796fdf105f4e8e958a86f5550
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control-Camera.py
@@ -0,0 +1,44 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=f"data/examples/wan/input_image.jpg"
+)
+input_image = Image.open("data/examples/wan/input_image.jpg")
+
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+ input_image=input_image,
+ camera_control_direction="Left", camera_control_speed=0.01,
+)
+save_video(video, "video_left.mp4", fps=15, quality=5)
+
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+ input_image=input_image,
+ camera_control_direction="Up", camera_control_speed=0.01,
+)
+save_video(video, "video_up.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..78635ff70b84e9ddb5c18d9959a57df96287af56
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-Control.py
@@ -0,0 +1,36 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=["data/examples/wan/control_video.mp4", "data/examples/wan/reference_image_girl.png"]
+)
+
+# Control video
+control_video = VideoData("data/examples/wan/control_video.mp4", height=832, width=576)
+reference_image = Image.open("data/examples/wan/reference_image_girl.png").resize((576, 832))
+video = pipe(
+ prompt="扁平风格动漫,一位长发少女优雅起舞。她五官精致,大眼睛明亮有神,黑色长发柔顺光泽。身穿淡蓝色T恤和深蓝色牛仔短裤。背景是粉色。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=control_video, reference_image=reference_image,
+ height=832, width=576, num_frames=49,
+ seed=1, tiled=True
+)
+save_video(video, "video1.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..334e981f7135c04bbe5826171f8e0a667a581be5
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-Fun-V1.1-14B-InP.py
@@ -0,0 +1,36 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=f"data/examples/wan/input_image.jpg"
+)
+image = Image.open("data/examples/wan/input_image.jpg")
+
+# First and last frame to video
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=image,
+ seed=0, tiled=True
+ # You can input `end_image=xxx` to control the last frame of the video.
+ # The model will automatically generate the dynamic content between `input_image` and `end_image`.
+)
+save_video(video, "video.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-I2V-14B-480P.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-I2V-14B-480P.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb2e5b06d52e404a7db6b1b0f3de4e8351e4c942
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-I2V-14B-480P.py
@@ -0,0 +1,34 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=f"data/examples/wan/input_image.jpg"
+)
+image = Image.open("data/examples/wan/input_image.jpg")
+
+# Image-to-video
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=image,
+ seed=0, tiled=True
+)
+save_video(video, "video.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-I2V-14B-720P.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-I2V-14B-720P.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb14d24f00b7c1e8d1af7fcc83f5cefb629463c5
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-I2V-14B-720P.py
@@ -0,0 +1,35 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=f"data/examples/wan/input_image.jpg"
+)
+image = Image.open("data/examples/wan/input_image.jpg")
+
+# Image-to-video
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=image,
+ seed=0, tiled=True,
+ height=720, width=1280,
+)
+save_video(video, "video.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-S2V-14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-S2V-14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..2495698f1258a0c50480e8f6e8505ff22bc7029a
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-S2V-14B.py
@@ -0,0 +1,33 @@
+import torch
+from PIL import Image
+import librosa
+from diffsynth import VideoData, save_video_with_audio
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="wav2vec2-large-xlsr-53-english/model.safetensors"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="Wan2.1_VAE.pth"),
+ ],
+ audio_processor_config=ModelConfig(model_id="Wan-AI/Wan2.2-S2V-14B", origin_file_pattern="wav2vec2-large-xlsr-53-english/"),
+)
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/example_video_dataset",
+ local_dir="./data/example_video_dataset",
+ allow_file_pattern=f"wans2v/*"
+)
+
+num_frames = 81 # 4n+1
+height = 448
+width = 832
+
+prompt = "a person is singing"
+negative_prompt = "画面模糊,最差质量,画面模糊,细节模糊不清,情绪激动剧烈,手快速抖动,字幕,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走"
+input_image = Image.open("data/example_video_dataset/wans2v/pose.png").convert("RGB").resize((width, height))
+# s2v audio input, recommend 16kHz sampling rate
+audio_path = 'data/example_video_dataset/wans2v/sing.MP3'
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-T2V-1.3B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-T2V-1.3B.py
new file mode 100644
index 0000000000000000000000000000000000000000..83e300b9b820530b243e13e996fe841a6f0e7dfb
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-T2V-1.3B.py
@@ -0,0 +1,34 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+# Text-to-video
+video = pipe(
+ prompt="纪实摄影风格画面,一只活泼的小狗在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+)
+save_video(video, "video1.mp4", fps=15, quality=5)
+
+# Video-to-video
+video = VideoData("video1.mp4", height=480, width=832)
+video = pipe(
+ prompt="纪实摄影风格画面,一只活泼的小狗戴着黑色墨镜在绿茵茵的草地上迅速奔跑。小狗毛色棕黄,戴着黑色墨镜,两只耳朵立起,神情专注而欢快。阳光洒在它身上,使得毛发看上去格外柔软而闪亮。背景是一片开阔的草地,偶尔点缀着几朵野花,远处隐约可见蓝天和几片白云。透视感鲜明,捕捉小狗奔跑时的动感和四周草地的生机。中景侧面移动视角。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_video=video, denoising_strength=0.7,
+ seed=1, tiled=True
+)
+save_video(video, "video2.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-T2V-14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-T2V-14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..40cb02d2214f12d4795ad67fb531fd5953d421bf
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-T2V-14B.py
@@ -0,0 +1,24 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+# Text-to-video
+video = pipe(
+ prompt="一名宇航员身穿太空服,面朝镜头骑着一匹机械马在火星表面驰骋。红色的荒凉地表延伸至远方,点缀着巨大的陨石坑和奇特的岩石结构。机械马的步伐稳健,扬起微弱的尘埃,展现出未来科技与原始探索的完美结合。宇航员手持操控装置,目光坚定,仿佛正在开辟人类的新疆域。背景是深邃的宇宙和蔚蓝的地球,画面既科幻又充满希望,让人不禁畅想未来的星际生活。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+)
+save_video(video, "video1.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-VACE-1.3B-Preview.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-VACE-1.3B-Preview.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c0242328e1c1557015dc420107b2671ca81c6a
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-VACE-1.3B-Preview.py
@@ -0,0 +1,52 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=["data/examples/wan/depth_video.mp4", "data/examples/wan/cat_fightning.jpg"]
+)
+
+# Depth video -> Video
+control_video = VideoData("data/examples/wan/depth_video.mp4", height=480, width=832)
+video = pipe(
+ prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_video=control_video,
+ seed=1, tiled=True
+)
+save_video(video, "video1.mp4", fps=15, quality=5)
+
+# Reference image -> Video
+video = pipe(
+ prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_reference_image=Image.open("data/examples/wan/cat_fightning.jpg").resize((832, 480)),
+ seed=1, tiled=True
+)
+save_video(video, "video2.mp4", fps=15, quality=5)
+
+# Depth video + Reference image -> Video
+video = pipe(
+ prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_video=control_video,
+ vace_reference_image=Image.open("data/examples/wan/cat_fightning.jpg").resize((832, 480)),
+ seed=1, tiled=True
+)
+save_video(video, "video3.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-VACE-1.3B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-VACE-1.3B.py
new file mode 100644
index 0000000000000000000000000000000000000000..59ca2d4dcbee69cfb35be3de744aea323347d3fa
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-VACE-1.3B.py
@@ -0,0 +1,53 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=["data/examples/wan/depth_video.mp4", "data/examples/wan/cat_fightning.jpg"]
+)
+
+# Depth video -> Video
+control_video = VideoData("data/examples/wan/depth_video.mp4", height=480, width=832)
+video = pipe(
+ prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_video=control_video,
+ seed=1, tiled=True
+)
+save_video(video, "video1.mp4", fps=15, quality=5)
+
+# Reference image -> Video
+video = pipe(
+ prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_reference_image=Image.open("data/examples/wan/cat_fightning.jpg").resize((832, 480)),
+ seed=1, tiled=True
+)
+save_video(video, "video2.mp4", fps=15, quality=5)
+
+# Depth video + Reference image -> Video
+video = pipe(
+ prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_video=control_video,
+ vace_reference_image=Image.open("data/examples/wan/cat_fightning.jpg").resize((832, 480)),
+ seed=1, tiled=True
+)
+save_video(video, "video3.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-VACE-14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-VACE-14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..e504b2c9e7db78ebd67912b7cb5c324b94f05711
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.1-VACE-14B.py
@@ -0,0 +1,54 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+
+
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=["data/examples/wan/depth_video.mp4", "data/examples/wan/cat_fightning.jpg"]
+)
+
+# Depth video -> Video
+control_video = VideoData("data/examples/wan/depth_video.mp4", height=480, width=832)
+video = pipe(
+ prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_video=control_video,
+ seed=1, tiled=True
+)
+save_video(video, "video1_14b.mp4", fps=15, quality=5)
+
+# Reference image -> Video
+video = pipe(
+ prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_reference_image=Image.open("data/examples/wan/cat_fightning.jpg").resize((832, 480)),
+ seed=1, tiled=True
+)
+save_video(video, "video2_14b.mp4", fps=15, quality=5)
+
+# Depth video + Reference image -> Video
+video = pipe(
+ prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_video=control_video,
+ vace_reference_image=Image.open("data/examples/wan/cat_fightning.jpg").resize((832, 480)),
+ seed=1, tiled=True
+)
+save_video(video, "video3_14b.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-Fun-A14B-Control-Camera.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-Fun-A14B-Control-Camera.py
new file mode 100644
index 0000000000000000000000000000000000000000..27cda27a319a004e328b50af646d1223744cd725
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-Fun-A14B-Control-Camera.py
@@ -0,0 +1,43 @@
+import torch
+from diffsynth import save_video,VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from PIL import Image
+from modelscope import dataset_snapshot_download
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-Control-Camera", origin_file_pattern="high_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-Control-Camera", origin_file_pattern="low_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-Control-Camera", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-Control-Camera", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=f"data/examples/wan/input_image.jpg"
+)
+input_image = Image.open("data/examples/wan/input_image.jpg")
+
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+ input_image=input_image,
+ camera_control_direction="Left", camera_control_speed=0.01,
+)
+save_video(video, "video_left.mp4", fps=15, quality=5)
+
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+ input_image=input_image,
+ camera_control_direction="Up", camera_control_speed=0.01,
+)
+save_video(video, "video_up.mp4", fps=15, quality=5)
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-Fun-A14B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-Fun-A14B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..29414225505422dd535dfe26aceb9d1e06faf41d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-Fun-A14B-Control.py
@@ -0,0 +1,35 @@
+import torch
+from diffsynth import save_video,VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from PIL import Image
+from modelscope import dataset_snapshot_download
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-Control", origin_file_pattern="high_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-Control", origin_file_pattern="low_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=["data/examples/wan/control_video.mp4", "data/examples/wan/reference_image_girl.png"]
+)
+
+# Control video
+control_video = VideoData("data/examples/wan/control_video.mp4", height=832, width=576)
+reference_image = Image.open("data/examples/wan/reference_image_girl.png").resize((576, 832))
+video = pipe(
+ prompt="扁平风格动漫,一位长发少女优雅起舞。她五官精致,大眼睛明亮有神,黑色长发柔顺光泽。身穿淡蓝色T恤和深蓝色牛仔短裤。背景是粉色。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=control_video, reference_image=reference_image,
+ height=832, width=576, num_frames=49,
+ seed=1, tiled=True
+)
+save_video(video, "video.mp4", fps=15, quality=5)
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-Fun-A14B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-Fun-A14B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..c63e522951a28caa66bd1141f2d133f51015e380
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-Fun-A14B-InP.py
@@ -0,0 +1,35 @@
+import torch
+from diffsynth import save_video
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from PIL import Image
+from modelscope import dataset_snapshot_download
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-InP", origin_file_pattern="high_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-InP", origin_file_pattern="low_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.2-Fun-A14B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=f"data/examples/wan/input_image.jpg"
+)
+image = Image.open("data/examples/wan/input_image.jpg")
+
+# First and last frame to video
+video = pipe(
+ prompt="一艘小船正勇敢地乘风破浪前行。蔚蓝的大海波涛汹涌,白色的浪花拍打着船身,但小船毫不畏惧,坚定地驶向远方。阳光洒在水面上,闪烁着金色的光芒,为这壮丽的场景增添了一抹温暖。镜头拉近,可以看到船上的旗帜迎风飘扬,象征着不屈的精神与冒险的勇气。这段画面充满力量,激励人心,展现了面对挑战时的无畏与执着。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=image,
+ seed=0, tiled=True,
+ # You can input `end_image=xxx` to control the last frame of the video.
+ # The model will automatically generate the dynamic content between `input_image` and `end_image`.
+)
+save_video(video, "video.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-I2V-A14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-I2V-A14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d795f515f2542ca27e527dcc4ab1413db09135d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-I2V-A14B.py
@@ -0,0 +1,33 @@
+import torch
+from PIL import Image
+from diffsynth import save_video
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="high_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="low_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=["data/examples/wan/cat_fightning.jpg"]
+)
+input_image = Image.open("data/examples/wan/cat_fightning.jpg").resize((832, 480))
+
+video = pipe(
+ prompt="Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+ input_image=input_image,
+ switch_DiT_boundary=0.9,
+)
+save_video(video, "video1.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-T2V-A14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-T2V-A14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..27b10d0a8314cbfd85b802230828f33287f30f31
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-T2V-A14B.py
@@ -0,0 +1,24 @@
+import torch
+from diffsynth import save_video
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.2-T2V-A14B", origin_file_pattern="high_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-T2V-A14B", origin_file_pattern="low_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-T2V-A14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-T2V-A14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+# Text-to-video
+video = pipe(
+ prompt="Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+)
+save_video(video, "video1.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-TI2V-5B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-TI2V-5B.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa919657a22b8701e1d393331c7b16dd17d9c6fb
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_inference/Wan2.2-TI2V-5B.py
@@ -0,0 +1,43 @@
+import torch
+from PIL import Image
+from diffsynth import save_video
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.2-TI2V-5B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-TI2V-5B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-TI2V-5B", origin_file_pattern="Wan2.2_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.enable_vram_management()
+
+# Text-to-video
+video = pipe(
+ prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+ height=704, width=1248,
+ num_frames=121,
+)
+save_video(video, "video1.mp4", fps=15, quality=5)
+
+# Image-to-video
+dataset_snapshot_download(
+ dataset_id="DiffSynth-Studio/examples_in_diffsynth",
+ local_dir="./",
+ allow_file_pattern=["data/examples/wan/cat_fightning.jpg"]
+)
+input_image = Image.open("data/examples/wan/cat_fightning.jpg").resize((1248, 704))
+video = pipe(
+ prompt="两只可爱的橘猫戴上拳击手套,站在一个拳击台上搏斗。",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=0, tiled=True,
+ height=704, width=1248,
+ input_image=input_image,
+ num_frames=121,
+)
+save_video(video, "video2.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-1.3b-speedcontrol-v1.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-1.3b-speedcontrol-v1.sh
new file mode 100644
index 0000000000000000000000000000000000000000..3d580ab95b96c0b727dc2874d0178110a8eec85c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-1.3b-speedcontrol-v1.sh
@@ -0,0 +1,13 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_motion_bucket_id.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-1.3B:Wan2.1_VAE.pth,DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1:model.safetensors" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.motion_controller." \
+ --output_path "./models/train/Wan2.1-1.3b-speedcontrol-v1_full" \
+ --trainable_models "motion_controller" \
+ --extra_inputs "motion_bucket_id"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-FLF2V-14B-720P.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-FLF2V-14B-720P.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c9f6f641040a5b65bd983cf337b1fb93df702718
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-FLF2V-14B-720P.sh
@@ -0,0 +1,13 @@
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-FLF2V-14B-720P:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-FLF2V-14B-720P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-FLF2V-14B-720P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-FLF2V-14B-720P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-FLF2V-14B-720P_full" \
+ --trainable_models "dit" \
+ --extra_inputs "input_image,end_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-Control.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-Control.sh
new file mode 100644
index 0000000000000000000000000000000000000000..45a99ded5ad105c775b4ee641164fb21e2ba780f
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-Control.sh
@@ -0,0 +1,14 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_control.csv \
+ --data_file_keys "video,control_video" \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-1.3B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-1.3B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-1.3B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-1.3B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-1.3B-Control_full" \
+ --trainable_models "dit" \
+ --extra_inputs "control_video"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-InP.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-InP.sh
new file mode 100644
index 0000000000000000000000000000000000000000..a202bf9890797c719ec2e67dda93ea35b0273394
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-1.3B-InP.sh
@@ -0,0 +1,13 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-1.3B-InP:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-1.3B-InP:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-1.3B-InP:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-1.3B-InP:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-1.3B-InP_full" \
+ --trainable_models "dit" \
+ --extra_inputs "input_image,end_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-Control.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-Control.sh
new file mode 100644
index 0000000000000000000000000000000000000000..8a17c3f1c6127ee21b01aa9a169bcb55bdd758f1
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-Control.sh
@@ -0,0 +1,14 @@
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_control.csv \
+ --data_file_keys "video,control_video" \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-14B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-14B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-14B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-14B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-14B-Control_full" \
+ --trainable_models "dit" \
+ --extra_inputs "control_video"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-InP.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-InP.sh
new file mode 100644
index 0000000000000000000000000000000000000000..86feae73cab10e414de5f3a2c6e400285495dfaa
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-14B-InP.sh
@@ -0,0 +1,13 @@
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-14B-InP:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-14B-InP:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-14B-InP:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-14B-InP:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-14B-InP_full" \
+ --trainable_models "dit" \
+ --extra_inputs "input_image,end_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh
new file mode 100644
index 0000000000000000000000000000000000000000..b59ed32c5c844e40eaa053f5ccb2f9e3f5af9f0b
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh
@@ -0,0 +1,13 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_camera_control.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-V1.1-1.3B-Control-Camera_full" \
+ --trainable_models "dit" \
+ --extra_inputs "input_image,camera_control_direction,camera_control_speed"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control.sh
new file mode 100644
index 0000000000000000000000000000000000000000..34273c1ec50a8b4850858bfc712088812c6005e6
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-Control.sh
@@ -0,0 +1,14 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_reference_control.csv \
+ --data_file_keys "video,control_video,reference_image" \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-1.3B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-1.3B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-1.3B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-1.3B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-V1.1-1.3B-Control_full" \
+ --trainable_models "dit" \
+ --extra_inputs "control_video,reference_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-InP.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-InP.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f6eed97db8ab61860549704d3238db7426073e77
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-1.3B-InP.sh
@@ -0,0 +1,13 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-1.3B-InP:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-1.3B-InP:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-1.3B-InP:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-1.3B-InP:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-V1.1-1.3B-InP_full" \
+ --trainable_models "dit" \
+ --extra_inputs "input_image,end_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control-Camera.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control-Camera.sh
new file mode 100644
index 0000000000000000000000000000000000000000..41b87e990df97bff7521e81d418868d6f1d1e054
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control-Camera.sh
@@ -0,0 +1,13 @@
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_camera_control.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-14B-Control-Camera:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-14B-Control-Camera:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-14B-Control-Camera:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-14B-Control-Camera:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-V1.1-14B-Control-Camera_full" \
+ --trainable_models "dit" \
+ --extra_inputs "input_image,camera_control_direction,camera_control_speed"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ce6640e6334f5f81372ff435c7856864594949ad
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-Control.sh
@@ -0,0 +1,14 @@
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_reference_control.csv \
+ --data_file_keys "video,control_video,reference_image" \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-14B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-14B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-14B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-14B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-V1.1-14B-Control_full" \
+ --trainable_models "dit" \
+ --extra_inputs "control_video,reference_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-InP.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-InP.sh
new file mode 100644
index 0000000000000000000000000000000000000000..afb5d3dab59cebfc4af7628f63d90b107b87e522
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-Fun-V1.1-14B-InP.sh
@@ -0,0 +1,13 @@
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-14B-InP:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-14B-InP:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-14B-InP:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-14B-InP:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-V1.1-14B-InP_full" \
+ --trainable_models "dit" \
+ --extra_inputs "input_image,end_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-480P.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-480P.sh
new file mode 100644
index 0000000000000000000000000000000000000000..6d257ff55c58ca117d6b22bf84e61bcab664a2e0
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-480P.sh
@@ -0,0 +1,13 @@
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-I2V-14B-480P:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-I2V-14B-480P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-I2V-14B-480P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-I2V-14B-480P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-I2V-14B-480P_full" \
+ --trainable_models "dit" \
+ --extra_inputs "input_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-720P.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-720P.sh
new file mode 100644
index 0000000000000000000000000000000000000000..bbb2870d21c85f3f6d37c6661d56dab216211e54
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-I2V-14B-720P.sh
@@ -0,0 +1,15 @@
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 720 \
+ --width 1280 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-I2V-14B-720P:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-I2V-14B-720P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-I2V-14B-720P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-I2V-14B-720P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-I2V-14B-720P_full" \
+ --trainable_models "dit" \
+ --extra_inputs "input_image" \
+ --use_gradient_checkpointing_offload
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-T2V-1.3B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-T2V-1.3B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e0d6e842ad12407fd5cbfc4bfa3c65618b2fa94c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-T2V-1.3B.sh
@@ -0,0 +1,12 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-1.3B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-T2V-1.3B_full" \
+ --trainable_models "dit"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-T2V-14B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-T2V-14B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ae804b0503cd0903e18b32bd11a18675d01a43dc
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-T2V-14B.sh
@@ -0,0 +1,12 @@
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-14B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-14B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-T2V-14B_full" \
+ --trainable_models "dit"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-VACE-1.3B-Preview.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-VACE-1.3B-Preview.sh
new file mode 100644
index 0000000000000000000000000000000000000000..b348874f08e74f83d476444de143f4cfe4304dec
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-VACE-1.3B-Preview.sh
@@ -0,0 +1,16 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_vace.csv \
+ --data_file_keys "video,vace_video,vace_reference_image" \
+ --height 480 \
+ --width 832 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "iic/VACE-Wan2.1-1.3B-Preview:diffusion_pytorch_model*.safetensors,iic/VACE-Wan2.1-1.3B-Preview:models_t5_umt5-xxl-enc-bf16.pth,iic/VACE-Wan2.1-1.3B-Preview:Wan2.1_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.vace." \
+ --output_path "./models/train/Wan2.1-VACE-1.3B-Preview_full" \
+ --trainable_models "vace" \
+ --extra_inputs "vace_video,vace_reference_image" \
+ --use_gradient_checkpointing_offload
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-VACE-1.3B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-VACE-1.3B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..763252e14ee78e74fe001157bfb136bac62577d9
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-VACE-1.3B.sh
@@ -0,0 +1,16 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_vace.csv \
+ --data_file_keys "video,vace_video,vace_reference_image" \
+ --height 480 \
+ --width 832 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-VACE-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-VACE-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-VACE-1.3B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.vace." \
+ --output_path "./models/train/Wan2.1-VACE-1.3B_full" \
+ --trainable_models "vace" \
+ --extra_inputs "vace_video,vace_reference_image" \
+ --use_gradient_checkpointing_offload
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-VACE-14B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-VACE-14B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c54926347b73cc5e96fce2fd8378161ea7cc1179
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.1-VACE-14B.sh
@@ -0,0 +1,16 @@
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_vace.csv \
+ --data_file_keys "video,vace_video,vace_reference_image" \
+ --height 480 \
+ --width 832 \
+ --num_frames 17 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-VACE-14B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-VACE-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-VACE-14B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.vace." \
+ --output_path "./models/train/Wan2.1-VACE-14B_full" \
+ --trainable_models "vace" \
+ --extra_inputs "vace_video,vace_reference_image" \
+ --use_gradient_checkpointing_offload
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.2-I2V-A14B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.2-I2V-A14B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..10fb02f2923d6c324aab9c3985a980661b45832a
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.2-I2V-A14B.sh
@@ -0,0 +1,37 @@
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.2-I2V-A14B:high_noise_model/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-I2V-A14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-I2V-A14B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.2-I2V-A14B_high_noise_full" \
+ --trainable_models "dit" \
+ --extra_inputs "input_image" \
+ --use_gradient_checkpointing_offload \
+ --max_timestep_boundary 0.358 \
+ --min_timestep_boundary 0
+# boundary corresponds to timesteps [900, 1000]
+
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.2-I2V-A14B:low_noise_model/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-I2V-A14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-I2V-A14B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.2-I2V-A14B_low_noise_full" \
+ --trainable_models "dit" \
+ --extra_inputs "input_image" \
+ --use_gradient_checkpointing_offload \
+ --max_timestep_boundary 1 \
+ --min_timestep_boundary 0.358
+# boundary corresponds to timesteps [0, 900)
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.2-T2V-A14B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.2-T2V-A14B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..89c070429723a70e15d83acdc447a097e131abca
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.2-T2V-A14B.sh
@@ -0,0 +1,33 @@
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.2-T2V-A14B:high_noise_model/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-T2V-A14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-T2V-A14B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.2-T2V-A14B_high_noise_full" \
+ --trainable_models "dit" \
+ --max_timestep_boundary 0.417 \
+ --min_timestep_boundary 0
+# boundary corresponds to timesteps [875, 1000]
+
+accelerate launch --config_file examples/wanvideo/model_training/full/accelerate_config_14B.yaml examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.2-T2V-A14B:low_noise_model/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-T2V-A14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-T2V-A14B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.2-T2V-A14B_low_noise_full" \
+ --trainable_models "dit" \
+ --max_timestep_boundary 1 \
+ --min_timestep_boundary 0.417
+# boundary corresponds to timesteps [0, 875)
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.2-TI2V-5B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.2-TI2V-5B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..def9f897a558dc9a4f97b1d98adff11201482986
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/Wan2.2-TI2V-5B.sh
@@ -0,0 +1,14 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.2-TI2V-5B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-TI2V-5B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-TI2V-5B:Wan2.2_VAE.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 2 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.2-TI2V-5B_full" \
+ --trainable_models "dit" \
+ --extra_inputs "input_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/accelerate_config_14B.yaml b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/accelerate_config_14B.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3875a9da2354631046baf19e61a9d5ab1d8d6aca
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/accelerate_config_14B.yaml
@@ -0,0 +1,22 @@
+compute_environment: LOCAL_MACHINE
+debug: false
+deepspeed_config:
+ gradient_accumulation_steps: 1
+ offload_optimizer_device: cpu
+ offload_param_device: cpu
+ zero3_init_flag: false
+ zero_stage: 2
+distributed_type: DEEPSPEED
+downcast_bf16: 'no'
+enable_cpu_affinity: false
+machine_rank: 0
+main_training_function: main
+mixed_precision: bf16
+num_machines: 1
+num_processes: 8
+rdzv_backend: static
+same_network: true
+tpu_env: []
+tpu_use_cluster: false
+tpu_use_sudo: false
+use_cpu: false
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/run_test.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/run_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..093becdec3c721f1e3ddb401bf52507a5c7646d9
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/full/run_test.py
@@ -0,0 +1,38 @@
+import multiprocessing, os
+
+
+def run_task(scripts, thread_id, thread_num):
+ for script_id, script in enumerate(scripts):
+ if script_id % thread_num == thread_id:
+ log_file_name = script.replace("/", "_") + ".txt"
+ cmd = f"CUDA_VISIBLE_DEVICES={thread_id} bash {script} > data/log/{log_file_name} 2>&1"
+ os.makedirs("data/log", exist_ok=True)
+ print(cmd, flush=True)
+ os.system(cmd)
+
+
+if __name__ == "__main__":
+ # 1.3B
+ scripts = []
+ for file_name in os.listdir("examples/wanvideo/model_training/full"):
+ if file_name != "run_test.py" and "14B" not in file_name:
+ scripts.append(os.path.join("examples/wanvideo/model_training/full", file_name))
+
+ processes = [multiprocessing.Process(target=run_task, args=(scripts, i, 8)) for i in range(8)]
+ for p in processes:
+ p.start()
+ for p in processes:
+ p.join()
+
+ # 14B
+ scripts = []
+ for file_name in os.listdir("examples/wanvideo/model_training/full"):
+ if file_name != "run_test.py" and "14B" in file_name:
+ scripts.append(os.path.join("examples/wanvideo/model_training/full", file_name))
+ for script in scripts:
+ log_file_name = script.replace("/", "_") + ".txt"
+ cmd = f"bash {script} > data/log/{log_file_name} 2>&1"
+ print(cmd, flush=True)
+ os.system(cmd)
+
+ print("Done!")
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-1.3b-speedcontrol-v1.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-1.3b-speedcontrol-v1.sh
new file mode 100644
index 0000000000000000000000000000000000000000..51ebfe45730f74c5bcfaaaf46bc3adb476d1b563
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-1.3b-speedcontrol-v1.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_motion_bucket_id.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-1.3B:Wan2.1_VAE.pth,DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1:model.safetensors" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-1.3b-speedcontrol-v1_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "motion_bucket_id"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-FLF2V-14B-720P.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-FLF2V-14B-720P.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9a9622d55091685172ee2928e0842cc76018be4e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-FLF2V-14B-720P.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-FLF2V-14B-720P:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-FLF2V-14B-720P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-FLF2V-14B-720P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-FLF2V-14B-720P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-FLF2V-14B-720P_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "input_image,end_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-Control.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-Control.sh
new file mode 100644
index 0000000000000000000000000000000000000000..03c1f4517b7af9dbc65abe515af20f59c71067a4
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-Control.sh
@@ -0,0 +1,16 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_control.csv \
+ --data_file_keys "video,control_video" \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-1.3B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-1.3B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-1.3B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-1.3B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-1.3B-Control_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "control_video"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-InP.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-InP.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d5f509be6956fe99291529b8eb3d737102302982
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-1.3B-InP.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-1.3B-InP:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-1.3B-InP:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-1.3B-InP:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-1.3B-InP:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-1.3B-InP_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "input_image,end_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-Control.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-Control.sh
new file mode 100644
index 0000000000000000000000000000000000000000..608df5ff2010ca89ca9ae6bec075ab658aed799f
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-Control.sh
@@ -0,0 +1,16 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_control.csv \
+ --data_file_keys "video,control_video" \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-14B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-14B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-14B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-14B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-14B-Control_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "control_video"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-InP.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-InP.sh
new file mode 100644
index 0000000000000000000000000000000000000000..37b251812c20c4012a63dde8d98b44b53b9bb1b9
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-14B-InP.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-14B-InP:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-14B-InP:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-14B-InP:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-14B-InP:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-14B-InP_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "input_image,end_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh
new file mode 100644
index 0000000000000000000000000000000000000000..2f809a477b7ba85f517c777060fa53efc977f711
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_camera_control.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-V1.1-1.3B-Control-Camera_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "input_image,camera_control_direction,camera_control_speed"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1e7156df80d6f4a4a0dd752081c5feb6d465713c
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-Control.sh
@@ -0,0 +1,16 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_reference_control.csv \
+ --data_file_keys "video,control_video,reference_image" \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-1.3B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-1.3B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-1.3B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-1.3B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-V1.1-1.3B-Control_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "control_video,reference_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-InP.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-InP.sh
new file mode 100644
index 0000000000000000000000000000000000000000..5879f59b57104067bb3efc77ebcbf8bf28f54df5
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-1.3B-InP.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-1.3B-InP:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-1.3B-InP:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-1.3B-InP:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-1.3B-InP:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-V1.1-1.3B-InP_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "input_image,end_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control-Camera.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control-Camera.sh
new file mode 100644
index 0000000000000000000000000000000000000000..176a05fb419cc4dee753d5d282feeb6c0fa6f280
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control-Camera.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_camera_control.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-14B-Control-Camera:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-14B-Control-Camera:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-14B-Control-Camera:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-14B-Control-Camera:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-5 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-V1.1-14B-Control-Camera_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "input_image,camera_control_direction,camera_control_speed"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control.sh
new file mode 100644
index 0000000000000000000000000000000000000000..3ead12ce7f9a6da33c08a4c15ccf2727ae4ce3ff
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-Control.sh
@@ -0,0 +1,16 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_reference_control.csv \
+ --data_file_keys "video,control_video,reference_image" \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-14B-Control:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-14B-Control:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-14B-Control:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-14B-Control:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-V1.1-14B-Control_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "control_video,reference_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-InP.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-InP.sh
new file mode 100644
index 0000000000000000000000000000000000000000..40a8ad07950173d1b43b3595770fa4e294d44853
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-Fun-V1.1-14B-InP.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "PAI/Wan2.1-Fun-V1.1-14B-InP:diffusion_pytorch_model*.safetensors,PAI/Wan2.1-Fun-V1.1-14B-InP:models_t5_umt5-xxl-enc-bf16.pth,PAI/Wan2.1-Fun-V1.1-14B-InP:Wan2.1_VAE.pth,PAI/Wan2.1-Fun-V1.1-14B-InP:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-Fun-V1.1-14B-InP_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "input_image,end_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-480P.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-480P.sh
new file mode 100644
index 0000000000000000000000000000000000000000..473d51981702868874720056795b96bf1fe337c7
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-480P.sh
@@ -0,0 +1,15 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-I2V-14B-480P:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-I2V-14B-480P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-I2V-14B-480P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-I2V-14B-480P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-I2V-14B-480P_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "input_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-720P.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-720P.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ec987a8b71bf119f582839d8b603450f45b03296
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-I2V-14B-720P.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 720 \
+ --width 1280 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-I2V-14B-720P:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-I2V-14B-720P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-I2V-14B-720P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-I2V-14B-720P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-I2V-14B-720P_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "input_image" \
+ --use_gradient_checkpointing_offload
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-T2V-1.3B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-T2V-1.3B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d16a287193286ac4893878325b1a9b6076eab627
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-T2V-1.3B.sh
@@ -0,0 +1,14 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-1.3B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-T2V-1.3B_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-T2V-14B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-T2V-14B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1fb55ac3467dca84420013e999b2bcc4e8d689d8
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-T2V-14B.sh
@@ -0,0 +1,14 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-T2V-14B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-T2V-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-T2V-14B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.1-T2V-14B_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-VACE-1.3B-Preview.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-VACE-1.3B-Preview.sh
new file mode 100644
index 0000000000000000000000000000000000000000..2bcb55b9b6c1e4b02941a85f9fecc2f9ca470698
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-VACE-1.3B-Preview.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_vace.csv \
+ --data_file_keys "video,vace_video,vace_reference_image" \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "iic/VACE-Wan2.1-1.3B-Preview:diffusion_pytorch_model*.safetensors,iic/VACE-Wan2.1-1.3B-Preview:models_t5_umt5-xxl-enc-bf16.pth,iic/VACE-Wan2.1-1.3B-Preview:Wan2.1_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.vace." \
+ --output_path "./models/train/Wan2.1-VACE-1.3B-Preview_lora" \
+ --lora_base_model "vace" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "vace_video,vace_reference_image" \
+ --use_gradient_checkpointing_offload
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-VACE-1.3B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-VACE-1.3B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..b56507889bd957563e874c64f2c983c09db4eeb8
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-VACE-1.3B.sh
@@ -0,0 +1,17 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_vace.csv \
+ --data_file_keys "video,vace_video,vace_reference_image" \
+ --height 480 \
+ --width 832 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-VACE-1.3B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-VACE-1.3B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-VACE-1.3B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.vace." \
+ --output_path "./models/train/Wan2.1-VACE-1.3B_lora" \
+ --lora_base_model "vace" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "vace_video,vace_reference_image" \
+ --use_gradient_checkpointing_offload
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-VACE-14B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-VACE-14B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..633ea0e305b102a5b388670507429bdea869e3c5
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.1-VACE-14B.sh
@@ -0,0 +1,18 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata_vace.csv \
+ --data_file_keys "video,vace_video,vace_reference_image" \
+ --height 480 \
+ --width 832 \
+ --num_frames 17 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.1-VACE-14B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-VACE-14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-VACE-14B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.vace." \
+ --output_path "./models/train/Wan2.1-VACE-14B_lora" \
+ --lora_base_model "vace" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "vace_video,vace_reference_image" \
+ --use_gradient_checkpointing_offload
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.2-I2V-A14B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.2-I2V-A14B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1d9eba0aadd1f2c11d7ff6db199f1ad1e2536b59
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.2-I2V-A14B.sh
@@ -0,0 +1,39 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.2-I2V-A14B:high_noise_model/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-I2V-A14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-I2V-A14B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.2-I2V-A14B_high_noise_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "input_image" \
+ --max_timestep_boundary 0.358 \
+ --min_timestep_boundary 0
+# boundary corresponds to timesteps [900, 1000]
+
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.2-I2V-A14B:low_noise_model/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-I2V-A14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-I2V-A14B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.2-I2V-A14B_low_noise_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "input_image" \
+ --max_timestep_boundary 1 \
+ --min_timestep_boundary 0.358
+# boundary corresponds to timesteps [0, 900)
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.2-T2V-A14B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.2-T2V-A14B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f47c96b7197a99c3e1332c5a858f0a9755130ead
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.2-T2V-A14B.sh
@@ -0,0 +1,38 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.2-T2V-A14B:high_noise_model/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-T2V-A14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-T2V-A14B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.2-T2V-A14B_high_noise_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --max_timestep_boundary 0.417 \
+ --min_timestep_boundary 0
+# boundary corresponds to timesteps [875, 1000]
+
+
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.2-T2V-A14B:low_noise_model/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-T2V-A14B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-T2V-A14B:Wan2.1_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.2-T2V-A14B_low_noise_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --max_timestep_boundary 1 \
+ --min_timestep_boundary 0.417
+# boundary corresponds to timesteps [0, 875)
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.2-TI2V-5B.sh b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.2-TI2V-5B.sh
new file mode 100644
index 0000000000000000000000000000000000000000..6a33b5799e71f6fc68cc00386ff28b1368e2673b
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/Wan2.2-TI2V-5B.sh
@@ -0,0 +1,16 @@
+accelerate launch examples/wanvideo/model_training/train.py \
+ --dataset_base_path data/example_video_dataset \
+ --dataset_metadata_path data/example_video_dataset/metadata.csv \
+ --height 480 \
+ --width 832 \
+ --num_frames 49 \
+ --dataset_repeat 100 \
+ --model_id_with_origin_paths "Wan-AI/Wan2.2-TI2V-5B:diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.2-TI2V-5B:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.2-TI2V-5B:Wan2.2_VAE.pth" \
+ --learning_rate 1e-4 \
+ --num_epochs 5 \
+ --remove_prefix_in_ckpt "pipe.dit." \
+ --output_path "./models/train/Wan2.2-TI2V-5B_lora" \
+ --lora_base_model "dit" \
+ --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \
+ --lora_rank 32 \
+ --extra_inputs "input_image"
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/run_test.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/run_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec0f9e2ddcf86e38c735d36122a7ca417eddd6b4
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/lora/run_test.py
@@ -0,0 +1,25 @@
+import multiprocessing, os
+
+
+def run_task(scripts, thread_id, thread_num):
+ for script_id, script in enumerate(scripts):
+ if script_id % thread_num == thread_id:
+ log_file_name = script.replace("/", "_") + ".txt"
+ cmd = f"CUDA_VISIBLE_DEVICES={thread_id} bash {script} > data/log/{log_file_name} 2>&1"
+ os.makedirs("data/log", exist_ok=True)
+ print(cmd, flush=True)
+ os.system(cmd)
+
+
+if __name__ == "__main__":
+ scripts = []
+ for file_name in os.listdir("examples/wanvideo/model_training/lora"):
+ if file_name != "run_test.py":
+ scripts.append(os.path.join("examples/wanvideo/model_training/lora", file_name))
+
+ processes = [multiprocessing.Process(target=run_task, args=(scripts, i, 8)) for i in range(8)]
+ for p in processes:
+ p.start()
+ for p in processes:
+ p.join()
+ print("Done!")
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/train.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2f437e9e9be5851501a2664c8f8be862b0597de
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/train.py
@@ -0,0 +1,142 @@
+import torch, os, json
+from diffsynth import load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from diffsynth.trainers.utils import DiffusionTrainingModule, VideoDataset, ModelLogger, launch_training_task, wan_parser
+os.environ["TOKENIZERS_PARALLELISM"] = "false"
+
+
+
+class WanTrainingModule(DiffusionTrainingModule):
+ def __init__(
+ self,
+ model_paths=None, model_id_with_origin_paths=None,
+ trainable_models=None,
+ lora_base_model=None, lora_target_modules="q,k,v,o,ffn.0,ffn.2", lora_rank=32, lora_checkpoint=None,
+ use_gradient_checkpointing=True,
+ use_gradient_checkpointing_offload=False,
+ extra_inputs=None,
+ max_timestep_boundary=1.0,
+ min_timestep_boundary=0.0,
+ ):
+ super().__init__()
+ # Load models
+ model_configs = []
+ if model_paths is not None:
+ model_paths = json.loads(model_paths)
+ model_configs += [ModelConfig(path=path) for path in model_paths]
+ if model_id_with_origin_paths is not None:
+ model_id_with_origin_paths = model_id_with_origin_paths.split(",")
+ model_configs += [ModelConfig(model_id=i.split(":")[0], origin_file_pattern=i.split(":")[1]) for i in model_id_with_origin_paths]
+ self.pipe = WanVideoPipeline.from_pretrained(torch_dtype=torch.bfloat16, device="cpu", model_configs=model_configs)
+
+ # Reset training scheduler
+ self.pipe.scheduler.set_timesteps(1000, training=True)
+
+ # Freeze untrainable models
+ self.pipe.freeze_except([] if trainable_models is None else trainable_models.split(","))
+
+ # Add LoRA to the base models
+ if lora_base_model is not None:
+ model = self.add_lora_to_model(
+ getattr(self.pipe, lora_base_model),
+ target_modules=lora_target_modules.split(","),
+ lora_rank=lora_rank
+ )
+ if lora_checkpoint is not None:
+ state_dict = load_state_dict(lora_checkpoint)
+ state_dict = self.mapping_lora_state_dict(state_dict)
+ load_result = model.load_state_dict(state_dict, strict=False)
+ print(f"LoRA checkpoint loaded: {lora_checkpoint}, total {len(state_dict)} keys")
+ if len(load_result[1]) > 0:
+ print(f"Warning, LoRA key mismatch! Unexpected keys in LoRA checkpoint: {load_result[1]}")
+ setattr(self.pipe, lora_base_model, model)
+
+ # Store other configs
+ self.use_gradient_checkpointing = use_gradient_checkpointing
+ self.use_gradient_checkpointing_offload = use_gradient_checkpointing_offload
+ self.extra_inputs = extra_inputs.split(",") if extra_inputs is not None else []
+ self.max_timestep_boundary = max_timestep_boundary
+ self.min_timestep_boundary = min_timestep_boundary
+
+
+ def forward_preprocess(self, data):
+ # CFG-sensitive parameters
+ inputs_posi = {"prompt": data["prompt"]}
+ inputs_nega = {}
+
+ # CFG-unsensitive parameters
+ inputs_shared = {
+ # Assume you are using this pipeline for inference,
+ # please fill in the input parameters.
+ "input_video": data["video"],
+ "height": data["video"][0].size[1],
+ "width": data["video"][0].size[0],
+ "num_frames": len(data["video"]),
+ # Please do not modify the following parameters
+ # unless you clearly know what this will cause.
+ "cfg_scale": 1,
+ "tiled": False,
+ "rand_device": self.pipe.device,
+ "use_gradient_checkpointing": self.use_gradient_checkpointing,
+ "use_gradient_checkpointing_offload": self.use_gradient_checkpointing_offload,
+ "cfg_merge": False,
+ "vace_scale": 1,
+ "max_timestep_boundary": self.max_timestep_boundary,
+ "min_timestep_boundary": self.min_timestep_boundary,
+ }
+
+ # Extra inputs
+ for extra_input in self.extra_inputs:
+ if extra_input == "input_image":
+ inputs_shared["input_image"] = data["video"][0]
+ elif extra_input == "end_image":
+ inputs_shared["end_image"] = data["video"][-1]
+ elif extra_input == "reference_image" or extra_input == "vace_reference_image":
+ inputs_shared[extra_input] = data[extra_input][0]
+ else:
+ inputs_shared[extra_input] = data[extra_input]
+
+ # Pipeline units will automatically process the input parameters.
+ for unit in self.pipe.units:
+ inputs_shared, inputs_posi, inputs_nega = self.pipe.unit_runner(unit, self.pipe, inputs_shared, inputs_posi, inputs_nega)
+ return {**inputs_shared, **inputs_posi}
+
+
+ def forward(self, data, inputs=None):
+ if inputs is None: inputs = self.forward_preprocess(data)
+ models = {name: getattr(self.pipe, name) for name in self.pipe.in_iteration_models}
+ loss = self.pipe.training_loss(**models, **inputs)
+ return loss
+
+
+if __name__ == "__main__":
+ parser = wan_parser()
+ args = parser.parse_args()
+ dataset = VideoDataset(args=args)
+ model = WanTrainingModule(
+ model_paths=args.model_paths,
+ model_id_with_origin_paths=args.model_id_with_origin_paths,
+ trainable_models=args.trainable_models,
+ lora_base_model=args.lora_base_model,
+ lora_target_modules=args.lora_target_modules,
+ lora_rank=args.lora_rank,
+ lora_checkpoint=args.lora_checkpoint,
+ use_gradient_checkpointing_offload=args.use_gradient_checkpointing_offload,
+ extra_inputs=args.extra_inputs,
+ max_timestep_boundary=args.max_timestep_boundary,
+ min_timestep_boundary=args.min_timestep_boundary,
+ )
+ model_logger = ModelLogger(
+ args.output_path,
+ remove_prefix_in_ckpt=args.remove_prefix_in_ckpt
+ )
+ optimizer = torch.optim.AdamW(model.trainable_modules(), lr=args.learning_rate, weight_decay=args.weight_decay)
+ scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer)
+ launch_training_task(
+ dataset, model, model_logger, optimizer, scheduler,
+ num_epochs=args.num_epochs,
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
+ save_steps=args.save_steps,
+ find_unused_parameters=args.find_unused_parameters,
+ num_workers=args.dataset_num_workers,
+ )
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-1.3b-speedcontrol-v1.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-1.3b-speedcontrol-v1.py
new file mode 100644
index 0000000000000000000000000000000000000000..124749a8ebe2e5d87fff6aadcf1872988b487c29
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-1.3b-speedcontrol-v1.py
@@ -0,0 +1,28 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1", origin_file_pattern="model.safetensors", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-1.3b-speedcontrol-v1_full/epoch-1.safetensors")
+pipe.motion_controller.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+# Text-to-video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=1, tiled=True,
+ motion_bucket_id=50
+)
+save_video(video, "video_Wan2.1-1.3b-speedcontrol-v1.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-FLF2V-14B-720P.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-FLF2V-14B-720P.py
new file mode 100644
index 0000000000000000000000000000000000000000..41a67edfea24e40caf7fefbc013ae6517c2f7100
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-FLF2V-14B-720P.py
@@ -0,0 +1,33 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-FLF2V-14B-720P_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0],
+ end_image=video[80],
+ seed=0, tiled=True,
+ sigma_shift=16,
+)
+save_video(video, "video_Wan2.1-FLF2V-14B-720P.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..6726e9c3a71fabedd15a58844c103919af06a555
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-Control.py
@@ -0,0 +1,32 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-Fun-1.3B-Control_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(81)]
+
+# Control video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=video,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-1.3B-Control.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e1e6f3cac2ebe7b22008e3c797dd7b75fe5d5fa
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-1.3B-InP.py
@@ -0,0 +1,31 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-Fun-1.3B-InP_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0], end_image=video[80],
+ seed=0, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-1.3B-InP.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..08b0acb5a7e209dae1e0b096a113398a1b6faf26
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-Control.py
@@ -0,0 +1,32 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-Fun-14B-Control_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(81)]
+
+# Control video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=video,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-14B-Control.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7e39d7e8dbcbbb82eca12e6441bc22c2ed36f85
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-14B-InP.py
@@ -0,0 +1,31 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-Fun-14B-InP_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0], end_image=video[80],
+ seed=0, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-14B-InP.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5a210a59039c085fdc602bae6c2f600af10ac22
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py
@@ -0,0 +1,32 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-Fun-V1.1-1.3B-Control-Camera_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0],
+ camera_control_direction="Left", camera_control_speed=0.0,
+ seed=0, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-V1.1-1.3B-Control-Camera.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..6497e1b875d2a6186777594b895dae46bbfe6924
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-Control.py
@@ -0,0 +1,33 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-Fun-V1.1-1.3B-Control_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(81)]
+reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+# Control video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=video, reference_image=reference_image,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-V1.1-1.3B-Control.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd8ee204e6d79b522523262b75cf3290b55176ad
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-1.3B-InP.py
@@ -0,0 +1,31 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-Fun-V1.1-1.3B-InP_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0], end_image=video[80],
+ seed=0, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-V1.1-1.3B-InP.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control-Camera.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control-Camera.py
new file mode 100644
index 0000000000000000000000000000000000000000..2de51028922c429625d20d2d0c9b09cb3473a673
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control-Camera.py
@@ -0,0 +1,32 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-Fun-V1.1-14B-Control-Camera_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0],
+ camera_control_direction="Left", camera_control_speed=0.0,
+ seed=0, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-V1.1-14B-Control-Camera.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dd2516e9c6588e0161dc6e36b44e623f3745b75
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-Control.py
@@ -0,0 +1,33 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-Fun-V1.1-14B-Control_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(81)]
+reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+# Control video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=video, reference_image=reference_image,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-V1.1-14B-Control.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e944b064ab1d732bcdcd24894a3e4138f4a8282
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-Fun-V1.1-14B-InP.py
@@ -0,0 +1,31 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-Fun-V1.1-14B-InP_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0], end_image=video[80],
+ seed=0, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-V1.1-14B-InP.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-480P.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-480P.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1c8615aac171b81a702d7e1826ef48948d74d14
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-480P.py
@@ -0,0 +1,30 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-I2V-14B-480P_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+input_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=input_image,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-I2V-14B-480P.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-720P.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-720P.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9d39c88047b68ebe10f39fc548df3810bc13ae7
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-I2V-14B-720P.py
@@ -0,0 +1,31 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-I2V-14B-720P_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+input_image = VideoData("data/example_video_dataset/video1.mp4", height=720, width=1280)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=input_image,
+ height=720, width=1280, num_frames=49,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-I2V-14B-720P.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-1.3B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-1.3B.py
new file mode 100644
index 0000000000000000000000000000000000000000..1420514e0ed5ee01fc7a144708c70ee1910cd459
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-1.3B.py
@@ -0,0 +1,25 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-T2V-1.3B_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-T2V-1.3B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0107aef39e38565c2457562343124e0e45440d0
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-T2V-14B.py
@@ -0,0 +1,25 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-T2V-14B_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-T2V-14B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-VACE-1.3B-Preview.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-VACE-1.3B-Preview.py
new file mode 100644
index 0000000000000000000000000000000000000000..a916745155be47e0ce0ff7ec41f1dc1220c8e23b
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-VACE-1.3B-Preview.py
@@ -0,0 +1,30 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-VACE-1.3B-Preview_full/epoch-1.safetensors")
+pipe.vace.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(49)]
+reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_video=video, vace_reference_image=reference_image, num_frames=49,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-VACE-1.3B-Preview.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-VACE-1.3B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-VACE-1.3B.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a371e7456af056f6afdf7c1dd8aefb2082f77cf
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-VACE-1.3B.py
@@ -0,0 +1,30 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-VACE-1.3B_full/epoch-1.safetensors")
+pipe.vace.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(49)]
+reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_video=video, vace_reference_image=reference_image, num_frames=49,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-VACE-1.3B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-VACE-14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-VACE-14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..5553471bec03e201f8d896b06ab7019ab488e408
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.1-VACE-14B.py
@@ -0,0 +1,30 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.1-VACE-14B_full/epoch-1.safetensors")
+pipe.vace.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(17)]
+reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_video=video, vace_reference_image=reference_image, num_frames=17,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-VACE-14B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.2-I2V-A14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.2-I2V-A14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f6d253cfb6612061a4954a1eeeb9b3af70b1726
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.2-I2V-A14B.py
@@ -0,0 +1,33 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="high_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="low_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.2-I2V-A14B_high_noise_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+state_dict = load_state_dict("models/train/Wan2.2-I2V-A14B_low_noise_full/epoch-1.safetensors")
+pipe.dit2.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+input_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=input_image,
+ num_frames=49,
+ seed=1, tiled=True,
+)
+save_video(video, "video_Wan2.2-I2V-A14B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.2-T2V-A14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.2-T2V-A14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..be0e000e7da227afd641e8e395495d4d4eff849d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.2-T2V-A14B.py
@@ -0,0 +1,28 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.2-T2V-A14B", origin_file_pattern="high_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-T2V-A14B", origin_file_pattern="low_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-T2V-A14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-T2V-A14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.2-T2V-A14B_high_noise_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+state_dict = load_state_dict("models/train/Wan2.2-T2V-A14B_low_noise_full/epoch-1.safetensors")
+pipe.dit2.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.2-T2V-A14B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.2-TI2V-5B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.2-TI2V-5B.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a3d36cdb68c8a25cca04dce8c6975e85eb7974b
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/Wan2.2-TI2V-5B.py
@@ -0,0 +1,30 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.2-TI2V-5B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-TI2V-5B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-TI2V-5B", origin_file_pattern="Wan2.2_VAE.pth", offload_device="cpu"),
+ ],
+)
+state_dict = load_state_dict("models/train/Wan2.2-TI2V-5B_full/epoch-1.safetensors")
+pipe.dit.load_state_dict(state_dict)
+pipe.enable_vram_management()
+
+input_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=input_image,
+ num_frames=49,
+ seed=1, tiled=True,
+)
+save_video(video, "video_Wan2.2-TI2V-5B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/run_test.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/run_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4e32039a6e80c7f18c1fe8a6b7e4fdb84ae85d4
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_full/run_test.py
@@ -0,0 +1,25 @@
+import multiprocessing, os
+
+
+def run_task(scripts, thread_id, thread_num):
+ for script_id, script in enumerate(scripts):
+ if script_id % thread_num == thread_id:
+ log_file_name = script.replace("/", "_") + ".txt"
+ cmd = f"CUDA_VISIBLE_DEVICES={thread_id} python -u {script} > data/log/{log_file_name} 2>&1"
+ os.makedirs("data/log", exist_ok=True)
+ print(cmd, flush=True)
+ os.system(cmd)
+
+
+if __name__ == "__main__":
+ scripts = []
+ for file_name in os.listdir("examples/wanvideo/model_training/validate_full"):
+ if file_name != "run_test.py":
+ scripts.append(os.path.join("examples/wanvideo/model_training/validate_full", file_name))
+
+ processes = [multiprocessing.Process(target=run_task, args=(scripts, i, 8)) for i in range(8)]
+ for p in processes:
+ p.start()
+ for p in processes:
+ p.join()
+ print("Done!")
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-1.3b-speedcontrol-v1.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-1.3b-speedcontrol-v1.py
new file mode 100644
index 0000000000000000000000000000000000000000..167b8714bf01c83bc337d87fb4aaff356685b637
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-1.3b-speedcontrol-v1.py
@@ -0,0 +1,27 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="DiffSynth-Studio/Wan2.1-1.3b-speedcontrol-v1", origin_file_pattern="model.safetensors", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-1.3b-speedcontrol-v1_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+# Text-to-video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=1, tiled=True,
+ motion_bucket_id=50
+)
+save_video(video, "video_Wan2.1-1.3b-speedcontrol-v1.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-FLF2V-14B-720P.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-FLF2V-14B-720P.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd68f0eca7bcf58402ec0ebebcb2c4f69cf053c3
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-FLF2V-14B-720P.py
@@ -0,0 +1,32 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-FLF2V-14B-720P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-FLF2V-14B-720P_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0],
+ end_image=video[80],
+ seed=0, tiled=True,
+ sigma_shift=16,
+)
+save_video(video, "video_Wan2.1-FLF2V-14B-720P.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..7270c38e75640d2a6af0f97c67e03cbdc5cec83e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-Control.py
@@ -0,0 +1,31 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-1.3B-Control_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(81)]
+
+# Control video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=video,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-1.3B-Control.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..c904dfa4d01b537702092409fc73fddcb3fd6342
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-1.3B-InP.py
@@ -0,0 +1,30 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-1.3B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-1.3B-InP_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0], end_image=video[80],
+ seed=0, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-1.3B-InP.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..8631d054434554469a62d98dfa1eaaa208339773
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-Control.py
@@ -0,0 +1,31 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-14B-Control_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(81)]
+
+# Control video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=video,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-14B-Control.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..e020aacbe8c15e46f7a5dd21d3a79df0fb5f2e65
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-14B-InP.py
@@ -0,0 +1,30 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-14B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-14B-InP_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0], end_image=video[80],
+ seed=0, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-14B-InP.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py
new file mode 100644
index 0000000000000000000000000000000000000000..3023692448034848795a6b847a51ba9d6fc04d2d
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control-Camera.py
@@ -0,0 +1,31 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control-Camera", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-V1.1-1.3B-Control-Camera_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0],
+ camera_control_direction="Left", camera_control_speed=0.0,
+ seed=0, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-V1.1-1.3B-Control-Camera.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebcfd2f85d6625d66aaa7a49e439f3788eaba516
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-Control.py
@@ -0,0 +1,32 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-V1.1-1.3B-Control_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(81)]
+reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+# Control video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=video, reference_image=reference_image,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-V1.1-1.3B-Control.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..99eb2b48a4aacd52cb1361ce7f3d5d689d5902e0
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-1.3B-InP.py
@@ -0,0 +1,30 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-1.3B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-V1.1-1.3B-InP_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0], end_image=video[80],
+ seed=0, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-V1.1-1.3B-InP.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control-Camera.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control-Camera.py
new file mode 100644
index 0000000000000000000000000000000000000000..0edb2d0224f97bf385f16bb81a62d77e6bf7807e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control-Camera.py
@@ -0,0 +1,31 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control-Camera", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-V1.1-1.3B-Control-Camera_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0],
+ camera_control_direction="Left", camera_control_speed=0.0,
+ seed=0, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-V1.1-14B-Control-Camera.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b1109826b6583f637ceaf38dac3d02034815107
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-Control.py
@@ -0,0 +1,32 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-Control", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-V1.1-14B-Control_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(81)]
+reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+# Control video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ control_video=video, reference_image=reference_image,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-V1.1-14B-Control.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-InP.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-InP.py
new file mode 100644
index 0000000000000000000000000000000000000000..35088fb7b28885d861e30b18f052827dd97ffae8
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-Fun-V1.1-14B-InP.py
@@ -0,0 +1,30 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-InP", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-InP", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-InP", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="PAI/Wan2.1-Fun-V1.1-14B-InP", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-Fun-V1.1-14B-InP_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)
+
+# First and last frame to video
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=video[0], end_image=video[80],
+ seed=0, tiled=True
+)
+save_video(video, "video_Wan2.1-Fun-V1.1-14B-InP.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-480P.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-480P.py
new file mode 100644
index 0000000000000000000000000000000000000000..1687e36b1873c06aadfed3bb29076ffe365b9f4e
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-480P.py
@@ -0,0 +1,29 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-480P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-I2V-14B-480P_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+input_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=input_image,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-I2V-14B-480P.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-720P.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-720P.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd60f378329b71c3b26b36e70b254187c5c2e9cb
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-I2V-14B-720P.py
@@ -0,0 +1,30 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-I2V-14B-720P", origin_file_pattern="models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-I2V-14B-720P_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+input_image = VideoData("data/example_video_dataset/video1.mp4", height=720, width=1280)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=input_image,
+ height=720, width=1280, num_frames=49,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-I2V-14B-720P.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-1.3B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-1.3B.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cb6c020a7966853ee586e638af6522913d28628
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-1.3B.py
@@ -0,0 +1,24 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-T2V-1.3B_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-T2V-1.3B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b66a4996c273755b4172d7a1e39ac89a6938141
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-T2V-14B.py
@@ -0,0 +1,24 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-T2V-14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.1-T2V-14B_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-T2V-14B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-1.3B-Preview.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-1.3B-Preview.py
new file mode 100644
index 0000000000000000000000000000000000000000..91cbf9271c23a2e6ae1d4aac8d1e081915a7e6bc
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-1.3B-Preview.py
@@ -0,0 +1,29 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="iic/VACE-Wan2.1-1.3B-Preview", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.vace, "models/train/Wan2.1-VACE-1.3B-Preview_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(49)]
+reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_video=video, vace_reference_image=reference_image, num_frames=49,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-VACE-1.3B-Preview.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-1.3B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-1.3B.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5fd2037a5f9efefe60fe65b21bbf6fdb01cf632
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-1.3B.py
@@ -0,0 +1,29 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-1.3B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-1.3B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-1.3B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.vace, "models/train/Wan2.1-VACE-1.3B_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(49)]
+reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_video=video, vace_reference_image=reference_image, num_frames=49,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-VACE-1.3B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..bec5df394b2297e885ad65ac9d903af74655eafc
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.1-VACE-14B.py
@@ -0,0 +1,29 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-14B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.1-VACE-14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.vace, "models/train/Wan2.1-VACE-14B_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = VideoData("data/example_video_dataset/video1_softedge.mp4", height=480, width=832)
+video = [video[i] for i in range(17)]
+reference_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ vace_video=video, vace_reference_image=reference_image, num_frames=17,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.1-VACE-14B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.2-I2V-A14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.2-I2V-A14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..f221ef71383cb03f53c45ac948805b924fec8664
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.2-I2V-A14B.py
@@ -0,0 +1,31 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="high_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="low_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-I2V-A14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.2-I2V-A14B_high_noise_lora/epoch-4.safetensors", alpha=1)
+pipe.load_lora(pipe.dit2, "models/train/Wan2.2-I2V-A14B_low_noise_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+input_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=input_image,
+ num_frames=49,
+ seed=1, tiled=True,
+)
+save_video(video, "video_Wan2.2-I2V-A14B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.2-T2V-A14B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.2-T2V-A14B.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab439274bb9307da38df841edacff920a0d48bb3
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.2-T2V-A14B.py
@@ -0,0 +1,28 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.2-T2V-A14B", origin_file_pattern="high_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-T2V-A14B", origin_file_pattern="low_noise_model/diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-T2V-A14B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-T2V-A14B", origin_file_pattern="Wan2.1_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.2-T2V-A14B_high_noise_lora/epoch-4.safetensors", alpha=1)
+pipe.load_lora(pipe.dit2, "models/train/Wan2.2-T2V-A14B_low_noise_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ num_frames=49,
+ seed=1, tiled=True
+)
+save_video(video, "video_Wan2.2-T2V-A14B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.2-TI2V-5B.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.2-TI2V-5B.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5b16c8d19917ce487557297621a3a01a0925305
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/Wan2.2-TI2V-5B.py
@@ -0,0 +1,29 @@
+import torch
+from PIL import Image
+from diffsynth import save_video, VideoData, load_state_dict
+from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig
+from modelscope import dataset_snapshot_download
+
+
+pipe = WanVideoPipeline.from_pretrained(
+ torch_dtype=torch.bfloat16,
+ device="cuda",
+ model_configs=[
+ ModelConfig(model_id="Wan-AI/Wan2.2-TI2V-5B", origin_file_pattern="models_t5_umt5-xxl-enc-bf16.pth", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-TI2V-5B", origin_file_pattern="diffusion_pytorch_model*.safetensors", offload_device="cpu"),
+ ModelConfig(model_id="Wan-AI/Wan2.2-TI2V-5B", origin_file_pattern="Wan2.2_VAE.pth", offload_device="cpu"),
+ ],
+)
+pipe.load_lora(pipe.dit, "models/train/Wan2.2-TI2V-5B_lora/epoch-4.safetensors", alpha=1)
+pipe.enable_vram_management()
+
+input_image = VideoData("data/example_video_dataset/video1.mp4", height=480, width=832)[0]
+
+video = pipe(
+ prompt="from sunset to night, a small town, light, house, river",
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
+ input_image=input_image,
+ num_frames=49,
+ seed=1, tiled=True,
+)
+save_video(video, "video_Wan2.2-TI2V-5B.mp4", fps=15, quality=5)
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/run_test.py b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/run_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..367ee9d3c6afb4f781ed742a75a3e2a2a187564b
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/examples/wanvideo/model_training/validate_lora/run_test.py
@@ -0,0 +1,25 @@
+import multiprocessing, os
+
+
+def run_task(scripts, thread_id, thread_num):
+ for script_id, script in enumerate(scripts):
+ if script_id % thread_num == thread_id:
+ log_file_name = script.replace("/", "_") + ".txt"
+ cmd = f"CUDA_VISIBLE_DEVICES={thread_id} python -u {script} > data/log/{log_file_name} 2>&1"
+ os.makedirs("data/log", exist_ok=True)
+ print(cmd, flush=True)
+ os.system(cmd)
+
+
+if __name__ == "__main__":
+ scripts = []
+ for file_name in os.listdir("examples/wanvideo/model_training/validate_lora"):
+ if file_name != "run_test.py":
+ scripts.append(os.path.join("examples/wanvideo/model_training/validate_lora", file_name))
+
+ processes = [multiprocessing.Process(target=run_task, args=(scripts, i, 8)) for i in range(8)]
+ for p in processes:
+ p.start()
+ for p in processes:
+ p.join()
+ print("Done!")
\ No newline at end of file
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/AnimateDiff/Put AnimateDiff ckpt files here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/AnimateDiff/Put AnimateDiff ckpt files here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/Annotators/Put ControlNet annotators here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/Annotators/Put ControlNet annotators here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/BeautifulPrompt/Put BeautifulPrompt models here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/BeautifulPrompt/Put BeautifulPrompt models here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/ControlNet/Put ControlNet pth files here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/ControlNet/Put ControlNet pth files here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/FLUX/Put Stable Diffusion checkpoints here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/FLUX/Put Stable Diffusion checkpoints here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/HunyuanDiT/Put Hunyuan DiT checkpoints here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/HunyuanDiT/Put Hunyuan DiT checkpoints here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/IpAdapter/Put IP-Adapter checkpoints here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/IpAdapter/Put IP-Adapter checkpoints here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/RIFE/Put RIFE models here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/RIFE/Put RIFE models here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/kolors/Put Kolors checkpoints here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/kolors/Put Kolors checkpoints here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/exp_code/1_benchmark/DiffSynth-Studio/models/kolors/Put Kolors checkpoints here.txt
@@ -0,0 +1 @@
+
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/lora/Put lora files here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/lora/Put lora files here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/stable_diffusion/Put Stable Diffusion checkpoints here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/stable_diffusion/Put Stable Diffusion checkpoints here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/stable_diffusion_3/Put Stable Diffusion 3 checkpoints here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/stable_diffusion_3/Put Stable Diffusion 3 checkpoints here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/stable_diffusion_xl/Put Stable Diffusion XL checkpoints here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/stable_diffusion_xl/Put Stable Diffusion XL checkpoints here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/stable_diffusion_xl_turbo/Put Stable Diffusion XL Turbo checkpoints here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/stable_diffusion_xl_turbo/Put Stable Diffusion XL Turbo checkpoints here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/stable_video_diffusion/Put Stable Video Diffusion checkpoints here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/stable_video_diffusion/Put Stable Video Diffusion checkpoints here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/textual_inversion/Put Textual Inversion files here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/textual_inversion/Put Textual Inversion files here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/DiffSynth-Studio/models/translator/Put translator models here.txt b/exp_code/1_benchmark/DiffSynth-Studio/models/translator/Put translator models here.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/exp_code/1_benchmark/Flux-dev/infer.py b/exp_code/1_benchmark/Flux-dev/infer.py
new file mode 100644
index 0000000000000000000000000000000000000000..836df2f83432491802a1393181a760e97319ab30
--- /dev/null
+++ b/exp_code/1_benchmark/Flux-dev/infer.py
@@ -0,0 +1,17 @@
+import torch
+from diffusers import FluxPipeline
+
+pipe = FluxPipeline.from_pretrained("/mnt/workspace/checkpoints/black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
+pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
+
+prompt = "A cat holding a sign that says hello world"
+image = pipe(
+ prompt,
+ height=384, # 384 192 96
+ width=640, # 640 320 160
+ guidance_scale=3.5,
+ num_inference_steps=50,
+ max_sequence_length=512,
+ generator=torch.Generator("cpu").manual_seed(0)
+).images[0]
+image.save("flux-dev.png")
diff --git a/exp_code/1_benchmark/FramePack/.gitignore b/exp_code/1_benchmark/FramePack/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..e83dd2c013be96aa73adfbe5917bc4a610d27cc0
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/.gitignore
@@ -0,0 +1,178 @@
+hf_download/
+outputs/
+repo/
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# UV
+# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+#uv.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
+.pdm.toml
+.pdm-python
+.pdm-build/
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+.idea/
+
+# Ruff stuff:
+.ruff_cache/
+
+# PyPI configuration file
+.pypirc
diff --git a/exp_code/1_benchmark/FramePack/LICENSE b/exp_code/1_benchmark/FramePack/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/exp_code/1_benchmark/FramePack/README.md b/exp_code/1_benchmark/FramePack/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..310d45454da8bc1dbd65055059500a6fcad43673
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/README.md
@@ -0,0 +1,487 @@
+
+
+
+
+# FramePack
+
+Official implementation and desktop software for ["Packing Input Frame Context in Next-Frame Prediction Models for Video Generation"](https://lllyasviel.github.io/frame_pack_gitpage/).
+
+Links: [**Paper**](https://arxiv.org/abs/2504.12626), [**Project Page**](https://lllyasviel.github.io/frame_pack_gitpage/)
+
+FramePack is a next-frame (next-frame-section) prediction neural network structure that generates videos progressively.
+
+FramePack compresses input contexts to a constant length so that the generation workload is invariant to video length.
+
+FramePack can process a very large number of frames with 13B models even on laptop GPUs.
+
+FramePack can be trained with a much larger batch size, similar to the batch size for image diffusion training.
+
+**Video diffusion, but feels like image diffusion.**
+
+# News
+
+**2025 July 14:** Some pure text2video anti-drifting stress-test results of FramePack-P1 are uploaded [here,](https://lllyasviel.github.io/frame_pack_gitpage/p1/#text-to-video-stress-tests) using common prompts without any reference images.
+
+**2025 June 26:** Some results of FramePack-P1 are uploaded [here.](https://lllyasviel.github.io/frame_pack_gitpage/p1) The FramePack-P1 will be the next version of FramePack with two designs: Planned Anti-Drifting and History Discretization.
+
+**2025 May 03:** The FramePack-F1 is released. [Try it here.](https://github.com/lllyasviel/FramePack/discussions/459)
+
+Note that this GitHub repository is the only official FramePack website. We do not have any web services. All other websites are spam and fake, including but not limited to `framepack.co`, `frame_pack.co`, `framepack.net`, `frame_pack.net`, `framepack.ai`, `frame_pack.ai`, `framepack.pro`, `frame_pack.pro`, `framepack.cc`, `frame_pack.cc`,`framepackai.co`, `frame_pack_ai.co`, `framepackai.net`, `frame_pack_ai.net`, `framepackai.pro`, `frame_pack_ai.pro`, `framepackai.cc`, `frame_pack_ai.cc`, and so on. Again, they are all spam and fake. **Do not pay money or download files from any of those websites.**
+
+# Requirements
+
+Note that this repo is a functional desktop software with minimal standalone high-quality sampling system and memory management.
+
+**Start with this repo before you try anything else!**
+
+Requirements:
+
+* Nvidia GPU in RTX 30XX, 40XX, 50XX series that supports fp16 and bf16. The GTX 10XX/20XX are not tested.
+* Linux or Windows operating system.
+* At least 6GB GPU memory.
+
+To generate 1-minute video (60 seconds) at 30fps (1800 frames) using 13B model, the minimal required GPU memory is 6GB. (Yes 6 GB, not a typo. Laptop GPUs are okay.)
+
+About speed, on my RTX 4090 desktop it generates at a speed of 2.5 seconds/frame (unoptimized) or 1.5 seconds/frame (teacache). On my laptops like 3070ti laptop or 3060 laptop, it is about 4x to 8x slower. [Troubleshoot if your speed is much slower than this.](https://github.com/lllyasviel/FramePack/issues/151#issuecomment-2817054649)
+
+In any case, you will directly see the generated frames since it is next-frame(-section) prediction. So you will get lots of visual feedback before the entire video is generated.
+
+# Installation
+
+**Windows**:
+
+[>>> Click Here to Download One-Click Package (CUDA 12.6 + Pytorch 2.6) <<<](https://github.com/lllyasviel/FramePack/releases/download/windows/framepack_cu126_torch26.7z)
+
+After you download, you uncompress, use `update.bat` to update, and use `run.bat` to run.
+
+Note that running `update.bat` is important, otherwise you may be using a previous version with potential bugs unfixed.
+
+
+
+Note that the models will be downloaded automatically. You will download more than 30GB from HuggingFace.
+
+**Linux**:
+
+We recommend having an independent Python 3.10.
+
+ pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu126
+ pip install -r requirements.txt
+
+To start the GUI, run:
+
+ python demo_gradio.py
+
+Note that it supports `--share`, `--port`, `--server`, and so on.
+
+The software supports PyTorch attention, xformers, flash-attn, sage-attention. By default, it will just use PyTorch attention. You can install those attention kernels if you know how.
+
+For example, to install sage-attention (linux):
+
+ pip install sageattention==1.0.6
+
+However, you are highly recommended to first try without sage-attention since it will influence results, though the influence is minimal.
+
+# GUI
+
+
+
+On the left you upload an image and write a prompt.
+
+On the right are the generated videos and latent previews.
+
+Because this is a next-frame-section prediction model, videos will be generated longer and longer.
+
+You will see the progress bar for each section and the latent preview for the next section.
+
+Note that the initial progress may be slower than later diffusion as the device may need some warmup.
+
+# Sanity Check
+
+Before trying your own inputs, we highly recommend going through the sanity check to find out if any hardware or software went wrong.
+
+Next-frame-section prediction models are very sensitive to subtle differences in noise and hardware. Usually, people will get slightly different results on different devices, but the results should look overall similar. In some cases, if possible, you'll get exactly the same results.
+
+## Image-to-5-seconds
+
+Download this image:
+
+
+
+Copy this prompt:
+
+`The man dances energetically, leaping mid-air with fluid arm swings and quick footwork.`
+
+Set like this:
+
+(all default parameters, with teacache turned off)
+
+
+The result will be:
+
+
+
+ |
+
+ |
+
+
+ |
+ Video may be compressed by GitHub
+ |
+
+
+
+**Important Note:**
+
+Again, this is a next-frame-section prediction model. This means you will generate videos frame-by-frame or section-by-section.
+
+**If you get a much shorter video in the UI, like a video with only 1 second, then it is totally expected.** You just need to wait. More sections will be generated to complete the video.
+
+## Know the influence of TeaCache and Quantization
+
+Download this image:
+
+
+
+Copy this prompt:
+
+`The girl dances gracefully, with clear movements, full of charm.`
+
+Set like this:
+
+
+
+Turn off teacache:
+
+
+
+You will get this:
+
+
+
+ |
+
+ |
+
+
+ |
+ Video may be compressed by GitHub
+ |
+
+
+
+Now turn on teacache:
+
+
+
+About 30% users will get this (the other 70% will get other random results depending on their hardware):
+
+
+
+ |
+
+ |
+
+
+ |
+ A typical worse result.
+ |
+
+
+
+So you can see that teacache is not really lossless and sometimes can influence the result a lot.
+
+We recommend using teacache to try ideas and then using the full diffusion process to get high-quality results.
+
+This recommendation also applies to sage-attention, bnb quant, gguf, etc., etc.
+
+## Image-to-1-minute
+
+
+
+`The girl dances gracefully, with clear movements, full of charm.`
+
+
+
+Set video length to 60 seconds:
+
+
+
+If everything is in order you will get some result like this eventually.
+
+60s version:
+
+
+
+ |
+
+ |
+
+
+ |
+ Video may be compressed by GitHub
+ |
+
+
+
+6s version:
+
+
+
+ |
+
+ |
+
+
+ |
+ Video may be compressed by GitHub
+ |
+
+
+
+# More Examples
+
+Many more examples are in [**Project Page**](https://lllyasviel.github.io/frame_pack_gitpage/).
+
+Below are some more examples that you may be interested in reproducing.
+
+---
+
+
+
+`The girl dances gracefully, with clear movements, full of charm.`
+
+
+
+
+
+ |
+
+ |
+
+
+ |
+ Video may be compressed by GitHub
+ |
+
+
+
+---
+
+
+
+`The girl suddenly took out a sign that said “cute” using right hand`
+
+
+
+
+
+ |
+
+ |
+
+
+ |
+ Video may be compressed by GitHub
+ |
+
+
+
+---
+
+
+
+`The girl skateboarding, repeating the endless spinning and dancing and jumping on a skateboard, with clear movements, full of charm.`
+
+
+
+
+
+ |
+
+ |
+
+
+ |
+ Video may be compressed by GitHub
+ |
+
+
+
+---
+
+
+
+`The girl dances gracefully, with clear movements, full of charm.`
+
+
+
+
+
+ |
+
+ |
+
+
+ |
+ Video may be compressed by GitHub
+ |
+
+
+
+---
+
+
+
+`The man dances flamboyantly, swinging his hips and striking bold poses with dramatic flair.`
+
+
+
+
+
+ |
+
+ |
+
+
+ |
+ Video may be compressed by GitHub
+ |
+
+
+
+---
+
+
+
+`The woman dances elegantly among the blossoms, spinning slowly with flowing sleeves and graceful hand movements.`
+
+
+
+
+
+
+ |
+
+ |
+
+
+ |
+ Video may be compressed by GitHub
+ |
+
+
+
+---
+
+
+
+`The young man writes intensely, flipping papers and adjusting his glasses with swift, focused movements.`
+
+
+
+
+
+ |
+
+ |
+
+
+ |
+ Video may be compressed by GitHub
+ |
+
+
+
+---
+
+# Prompting Guideline
+
+Many people would ask how to write better prompts.
+
+Below is a ChatGPT template that I personally often use to get prompts:
+
+ You are an assistant that writes short, motion-focused prompts for animating images.
+
+ When the user sends an image, respond with a single, concise prompt describing visual motion (such as human activity, moving objects, or camera movements). Focus only on how the scene could come alive and become dynamic using brief phrases.
+
+ Larger and more dynamic motions (like dancing, jumping, running, etc.) are preferred over smaller or more subtle ones (like standing still, sitting, etc.).
+
+ Describe subject, then motion, then other things. For example: "The girl dances gracefully, with clear movements, full of charm."
+
+ If there is something that can dance (like a man, girl, robot, etc.), then prefer to describe it as dancing.
+
+ Stay in a loop: one image in, one motion prompt out. Do not explain, ask questions, or generate multiple options.
+
+You paste the instruct to ChatGPT and then feed it an image to get prompt like this:
+
+
+
+*The man dances powerfully, striking sharp poses and gliding smoothly across the reflective floor.*
+
+Usually this will give you a prompt that works well.
+
+You can also write prompts yourself. Concise prompts are usually preferred, for example:
+
+*The girl dances gracefully, with clear movements, full of charm.*
+
+*The man dances powerfully, with clear movements, full of energy.*
+
+and so on.
+
+# Cite
+
+ @article{zhang2025framepack,
+ title={Packing Input Frame Contexts in Next-Frame Prediction Models for Video Generation},
+ author={Lvmin Zhang and Maneesh Agrawala},
+ journal={Arxiv},
+ year={2025}
+ }
diff --git a/exp_code/1_benchmark/FramePack/demo_gradio.py b/exp_code/1_benchmark/FramePack/demo_gradio.py
new file mode 100644
index 0000000000000000000000000000000000000000..c59ed58a3430ebd64787523a6bbbee421f380cde
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/demo_gradio.py
@@ -0,0 +1,409 @@
+from diffusers_helper.hf_login import login
+
+import os
+
+os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
+
+import gradio as gr
+import torch
+import traceback
+import einops
+import safetensors.torch as sf
+import numpy as np
+import argparse
+import math
+
+from PIL import Image
+from diffusers import AutoencoderKLHunyuanVideo
+from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
+from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
+from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
+from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
+from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
+from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
+from diffusers_helper.thread_utils import AsyncStream, async_run
+from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
+from transformers import SiglipImageProcessor, SiglipVisionModel
+from diffusers_helper.clip_vision import hf_clip_vision_encode
+from diffusers_helper.bucket_tools import find_nearest_bucket
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--share', action='store_true')
+parser.add_argument("--server", type=str, default='0.0.0.0')
+parser.add_argument("--port", type=int, required=False)
+parser.add_argument("--inbrowser", action='store_true')
+args = parser.parse_args()
+
+# for win desktop probably use --server 127.0.0.1 --inbrowser
+# For linux server probably use --server 127.0.0.1 or do not use any cmd flags
+
+print(args)
+
+free_mem_gb = get_cuda_free_memory_gb(gpu)
+high_vram = free_mem_gb > 60
+
+print(f'Free VRAM {free_mem_gb} GB')
+print(f'High-VRAM Mode: {high_vram}')
+
+text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
+text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
+tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
+tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2')
+vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
+
+feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
+image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
+
+transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePackI2V_HY', torch_dtype=torch.bfloat16).cpu()
+
+vae.eval()
+text_encoder.eval()
+text_encoder_2.eval()
+image_encoder.eval()
+transformer.eval()
+
+if not high_vram:
+ vae.enable_slicing()
+ vae.enable_tiling()
+
+transformer.high_quality_fp32_output_for_inference = True
+print('transformer.high_quality_fp32_output_for_inference = True')
+
+transformer.to(dtype=torch.bfloat16)
+vae.to(dtype=torch.float16)
+image_encoder.to(dtype=torch.float16)
+text_encoder.to(dtype=torch.float16)
+text_encoder_2.to(dtype=torch.float16)
+
+vae.requires_grad_(False)
+text_encoder.requires_grad_(False)
+text_encoder_2.requires_grad_(False)
+image_encoder.requires_grad_(False)
+transformer.requires_grad_(False)
+
+if not high_vram:
+ # DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
+ DynamicSwapInstaller.install_model(transformer, device=gpu)
+ DynamicSwapInstaller.install_model(text_encoder, device=gpu)
+else:
+ text_encoder.to(gpu)
+ text_encoder_2.to(gpu)
+ image_encoder.to(gpu)
+ vae.to(gpu)
+ transformer.to(gpu)
+
+stream = AsyncStream()
+
+outputs_folder = './outputs/'
+os.makedirs(outputs_folder, exist_ok=True)
+
+
+@torch.no_grad()
+def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
+ total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
+ total_latent_sections = int(max(round(total_latent_sections), 1))
+
+ job_id = generate_timestamp()
+
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
+
+ try:
+ # Clean GPU
+ if not high_vram:
+ unload_complete_models(
+ text_encoder, text_encoder_2, image_encoder, vae, transformer
+ )
+
+ # Text encoding
+
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
+
+ if not high_vram:
+ fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
+ load_model_as_complete(text_encoder_2, target_device=gpu)
+
+ llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
+
+ if cfg == 1:
+ llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
+ else:
+ llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
+
+ llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
+ llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
+
+ # Processing input image
+
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Image processing ...'))))
+
+ H, W, C = input_image.shape
+ height, width = find_nearest_bucket(H, W, resolution=640)
+ input_image_np = resize_and_center_crop(input_image, target_width=width, target_height=height)
+
+ Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
+
+ input_image_pt = torch.from_numpy(input_image_np).float() / 127.5 - 1
+ input_image_pt = input_image_pt.permute(2, 0, 1)[None, :, None]
+
+ # VAE encoding
+
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'VAE encoding ...'))))
+
+ if not high_vram:
+ load_model_as_complete(vae, target_device=gpu)
+
+ start_latent = vae_encode(input_image_pt, vae)
+
+ # CLIP Vision
+
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
+
+ if not high_vram:
+ load_model_as_complete(image_encoder, target_device=gpu)
+
+ image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
+ image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
+
+ # Dtype
+
+ llama_vec = llama_vec.to(transformer.dtype)
+ llama_vec_n = llama_vec_n.to(transformer.dtype)
+ clip_l_pooler = clip_l_pooler.to(transformer.dtype)
+ clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
+ image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
+
+ # Sampling
+
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
+
+ rnd = torch.Generator("cpu").manual_seed(seed)
+ num_frames = latent_window_size * 4 - 3
+
+ history_latents = torch.zeros(size=(1, 16, 1 + 2 + 16, height // 8, width // 8), dtype=torch.float32).cpu()
+ history_pixels = None
+ total_generated_latent_frames = 0
+
+ latent_paddings = reversed(range(total_latent_sections))
+
+ if total_latent_sections > 4:
+ # In theory the latent_paddings should follow the above sequence, but it seems that duplicating some
+ # items looks better than expanding it when total_latent_sections > 4
+ # One can try to remove below trick and just
+ # use `latent_paddings = list(reversed(range(total_latent_sections)))` to compare
+ latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0]
+
+ for latent_padding in latent_paddings:
+ is_last_section = latent_padding == 0
+ latent_padding_size = latent_padding * latent_window_size
+
+ if stream.input_queue.top() == 'end':
+ stream.output_queue.push(('end', None))
+ return
+
+ print(f'latent_padding_size = {latent_padding_size}, is_last_section = {is_last_section}')
+
+ indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0)
+ clean_latent_indices_pre, blank_indices, latent_indices, clean_latent_indices_post, clean_latent_2x_indices, clean_latent_4x_indices = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1)
+ clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1)
+
+ clean_latents_pre = start_latent.to(history_latents)
+ clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[:, :, :1 + 2 + 16, :, :].split([1, 2, 16], dim=2)
+ clean_latents = torch.cat([clean_latents_pre, clean_latents_post], dim=2)
+
+ if not high_vram:
+ unload_complete_models()
+ move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
+
+ if use_teacache:
+ transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
+ else:
+ transformer.initialize_teacache(enable_teacache=False)
+
+ def callback(d):
+ preview = d['denoised']
+ preview = vae_decode_fake(preview)
+
+ preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
+ preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
+
+ if stream.input_queue.top() == 'end':
+ stream.output_queue.push(('end', None))
+ raise KeyboardInterrupt('User ends the task.')
+
+ current_step = d['i'] + 1
+ percentage = int(100.0 * current_step / steps)
+ hint = f'Sampling {current_step}/{steps}'
+ desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'
+ stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
+ return
+
+ generated_latents = sample_hunyuan(
+ transformer=transformer,
+ sampler='unipc',
+ width=width,
+ height=height,
+ frames=num_frames,
+ real_guidance_scale=cfg,
+ distilled_guidance_scale=gs,
+ guidance_rescale=rs,
+ # shift=3.0,
+ num_inference_steps=steps,
+ generator=rnd,
+ prompt_embeds=llama_vec,
+ prompt_embeds_mask=llama_attention_mask,
+ prompt_poolers=clip_l_pooler,
+ negative_prompt_embeds=llama_vec_n,
+ negative_prompt_embeds_mask=llama_attention_mask_n,
+ negative_prompt_poolers=clip_l_pooler_n,
+ device=gpu,
+ dtype=torch.bfloat16,
+ image_embeddings=image_encoder_last_hidden_state,
+ latent_indices=latent_indices,
+ clean_latents=clean_latents,
+ clean_latent_indices=clean_latent_indices,
+ clean_latents_2x=clean_latents_2x,
+ clean_latent_2x_indices=clean_latent_2x_indices,
+ clean_latents_4x=clean_latents_4x,
+ clean_latent_4x_indices=clean_latent_4x_indices,
+ callback=callback,
+ )
+
+ if is_last_section:
+ generated_latents = torch.cat([start_latent.to(generated_latents), generated_latents], dim=2)
+
+ total_generated_latent_frames += int(generated_latents.shape[2])
+ history_latents = torch.cat([generated_latents.to(history_latents), history_latents], dim=2)
+
+ if not high_vram:
+ offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
+ load_model_as_complete(vae, target_device=gpu)
+
+ real_history_latents = history_latents[:, :, :total_generated_latent_frames, :, :]
+
+ if history_pixels is None:
+ history_pixels = vae_decode(real_history_latents, vae).cpu()
+ else:
+ section_latent_frames = (latent_window_size * 2 + 1) if is_last_section else (latent_window_size * 2)
+ overlapped_frames = latent_window_size * 4 - 3
+
+ current_pixels = vae_decode(real_history_latents[:, :, :section_latent_frames], vae).cpu()
+ history_pixels = soft_append_bcthw(current_pixels, history_pixels, overlapped_frames)
+
+ if not high_vram:
+ unload_complete_models()
+
+ output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
+
+ save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
+
+ print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
+
+ stream.output_queue.push(('file', output_filename))
+
+ if is_last_section:
+ break
+ except:
+ traceback.print_exc()
+
+ if not high_vram:
+ unload_complete_models(
+ text_encoder, text_encoder_2, image_encoder, vae, transformer
+ )
+
+ stream.output_queue.push(('end', None))
+ return
+
+
+def process(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
+ global stream
+ assert input_image is not None, 'No input image!'
+
+ yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
+
+ stream = AsyncStream()
+
+ async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
+
+ output_filename = None
+
+ while True:
+ flag, data = stream.output_queue.next()
+
+ if flag == 'file':
+ output_filename = data
+ yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
+
+ if flag == 'progress':
+ preview, desc, html = data
+ yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
+
+ if flag == 'end':
+ yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
+ break
+
+
+def end_process():
+ stream.input_queue.push('end')
+
+
+quick_prompts = [
+ 'The girl dances gracefully, with clear movements, full of charm.',
+ 'A character doing some simple body movements.',
+]
+quick_prompts = [[x] for x in quick_prompts]
+
+
+css = make_progress_bar_css()
+block = gr.Blocks(css=css).queue()
+with block:
+ gr.Markdown('# FramePack')
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(sources='upload', type="numpy", label="Image", height=320)
+ prompt = gr.Textbox(label="Prompt", value='')
+ example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Quick List', samples_per_page=1000, components=[prompt])
+ example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
+
+ with gr.Row():
+ start_button = gr.Button(value="Start Generation")
+ end_button = gr.Button(value="End Generation", interactive=False)
+
+ with gr.Group():
+ use_teacache = gr.Checkbox(label='Use TeaCache', value=True, info='Faster speed, but often makes hands and fingers slightly worse.')
+
+ n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=False) # Not used
+ seed = gr.Number(label="Seed", value=31337, precision=0)
+
+ total_second_length = gr.Slider(label="Total Video Length (Seconds)", minimum=1, maximum=120, value=5, step=0.1)
+ latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1, visible=False) # Should not change
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Changing this value is not recommended.')
+
+ cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=False) # Should not change
+ gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01, info='Changing this value is not recommended.')
+ rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
+
+ gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
+
+ mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
+
+ with gr.Column():
+ preview_image = gr.Image(label="Next Latents", height=200, visible=False)
+ result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
+ gr.Markdown('Note that the ending actions will be generated before the starting actions due to the inverted sampling. If the starting action is not in the video, you just need to wait, and it will be generated later.')
+ progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
+ progress_bar = gr.HTML('', elem_classes='no-generating-animation')
+
+ gr.HTML('')
+
+ ips = [input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
+ start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
+ end_button.click(fn=end_process)
+
+
+block.launch(
+ server_name=args.server,
+ server_port=args.port,
+ share=args.share,
+ inbrowser=args.inbrowser,
+)
diff --git a/exp_code/1_benchmark/FramePack/demo_gradio_f1.py b/exp_code/1_benchmark/FramePack/demo_gradio_f1.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb9133de2e0c178b65eeaa9cb42cbfab29a29137
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/demo_gradio_f1.py
@@ -0,0 +1,390 @@
+from diffusers_helper.hf_login import login
+
+import os
+
+os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
+
+import gradio as gr
+import torch
+import traceback
+import einops
+import safetensors.torch as sf
+import numpy as np
+import argparse
+import math
+
+from PIL import Image
+from diffusers import AutoencoderKLHunyuanVideo
+from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
+from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
+from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
+from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
+from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
+from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
+from diffusers_helper.thread_utils import AsyncStream, async_run
+from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
+from transformers import SiglipImageProcessor, SiglipVisionModel
+from diffusers_helper.clip_vision import hf_clip_vision_encode
+from diffusers_helper.bucket_tools import find_nearest_bucket
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--share', action='store_true')
+parser.add_argument("--server", type=str, default='0.0.0.0')
+parser.add_argument("--port", type=int, required=False)
+parser.add_argument("--inbrowser", action='store_true')
+args = parser.parse_args()
+
+# for win desktop probably use --server 127.0.0.1 --inbrowser
+# For linux server probably use --server 127.0.0.1 or do not use any cmd flags
+
+print(args)
+
+free_mem_gb = get_cuda_free_memory_gb(gpu)
+high_vram = free_mem_gb > 60
+
+print(f'Free VRAM {free_mem_gb} GB')
+print(f'High-VRAM Mode: {high_vram}')
+
+text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
+text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
+tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
+tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2')
+vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
+
+feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
+image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
+
+transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu()
+
+vae.eval()
+text_encoder.eval()
+text_encoder_2.eval()
+image_encoder.eval()
+transformer.eval()
+
+if not high_vram:
+ vae.enable_slicing()
+ vae.enable_tiling()
+
+transformer.high_quality_fp32_output_for_inference = True
+print('transformer.high_quality_fp32_output_for_inference = True')
+
+transformer.to(dtype=torch.bfloat16)
+vae.to(dtype=torch.float16)
+image_encoder.to(dtype=torch.float16)
+text_encoder.to(dtype=torch.float16)
+text_encoder_2.to(dtype=torch.float16)
+
+vae.requires_grad_(False)
+text_encoder.requires_grad_(False)
+text_encoder_2.requires_grad_(False)
+image_encoder.requires_grad_(False)
+transformer.requires_grad_(False)
+
+if not high_vram:
+ # DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
+ DynamicSwapInstaller.install_model(transformer, device=gpu)
+ DynamicSwapInstaller.install_model(text_encoder, device=gpu)
+else:
+ text_encoder.to(gpu)
+ text_encoder_2.to(gpu)
+ image_encoder.to(gpu)
+ vae.to(gpu)
+ transformer.to(gpu)
+
+stream = AsyncStream()
+
+outputs_folder = './outputs/'
+os.makedirs(outputs_folder, exist_ok=True)
+
+
+@torch.no_grad()
+def worker(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
+ total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
+ total_latent_sections = int(max(round(total_latent_sections), 1))
+
+ job_id = generate_timestamp()
+
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
+
+ try:
+ # Clean GPU
+ if not high_vram:
+ unload_complete_models(
+ text_encoder, text_encoder_2, image_encoder, vae, transformer
+ )
+
+ # Text encoding
+
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
+
+ if not high_vram:
+ fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
+ load_model_as_complete(text_encoder_2, target_device=gpu)
+
+ llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
+
+ if cfg == 1:
+ llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
+ else:
+ llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
+
+ llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
+ llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
+
+ # Processing input image
+
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Image processing ...'))))
+
+ H, W, C = input_image.shape
+ height, width = find_nearest_bucket(H, W, resolution=640)
+ input_image_np = resize_and_center_crop(input_image, target_width=width, target_height=height)
+
+ Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
+
+ input_image_pt = torch.from_numpy(input_image_np).float() / 127.5 - 1
+ input_image_pt = input_image_pt.permute(2, 0, 1)[None, :, None]
+
+ # VAE encoding
+
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'VAE encoding ...'))))
+
+ if not high_vram:
+ load_model_as_complete(vae, target_device=gpu)
+
+ start_latent = vae_encode(input_image_pt, vae)
+
+ # CLIP Vision
+
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
+
+ if not high_vram:
+ load_model_as_complete(image_encoder, target_device=gpu)
+
+ image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
+ image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
+
+ # Dtype
+
+ llama_vec = llama_vec.to(transformer.dtype)
+ llama_vec_n = llama_vec_n.to(transformer.dtype)
+ clip_l_pooler = clip_l_pooler.to(transformer.dtype)
+ clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
+ image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
+
+ # Sampling
+
+ stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
+
+ rnd = torch.Generator("cpu").manual_seed(seed)
+
+ history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu()
+ history_pixels = None
+
+ history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)
+ total_generated_latent_frames = 1
+
+ for section_index in range(total_latent_sections):
+ if stream.input_queue.top() == 'end':
+ stream.output_queue.push(('end', None))
+ return
+
+ print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
+
+ if not high_vram:
+ unload_complete_models()
+ move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
+
+ if use_teacache:
+ transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
+ else:
+ transformer.initialize_teacache(enable_teacache=False)
+
+ def callback(d):
+ preview = d['denoised']
+ preview = vae_decode_fake(preview)
+
+ preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
+ preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
+
+ if stream.input_queue.top() == 'end':
+ stream.output_queue.push(('end', None))
+ raise KeyboardInterrupt('User ends the task.')
+
+ current_step = d['i'] + 1
+ percentage = int(100.0 * current_step / steps)
+ hint = f'Sampling {current_step}/{steps}'
+ desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'
+ stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
+ return
+
+ indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
+ clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
+ clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
+
+ clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]):, :, :].split([16, 2, 1], dim=2)
+ clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
+
+ generated_latents = sample_hunyuan(
+ transformer=transformer,
+ sampler='unipc',
+ width=width,
+ height=height,
+ frames=latent_window_size * 4 - 3,
+ real_guidance_scale=cfg,
+ distilled_guidance_scale=gs,
+ guidance_rescale=rs,
+ # shift=3.0,
+ num_inference_steps=steps,
+ generator=rnd,
+ prompt_embeds=llama_vec,
+ prompt_embeds_mask=llama_attention_mask,
+ prompt_poolers=clip_l_pooler,
+ negative_prompt_embeds=llama_vec_n,
+ negative_prompt_embeds_mask=llama_attention_mask_n,
+ negative_prompt_poolers=clip_l_pooler_n,
+ device=gpu,
+ dtype=torch.bfloat16,
+ image_embeddings=image_encoder_last_hidden_state,
+ latent_indices=latent_indices,
+ clean_latents=clean_latents,
+ clean_latent_indices=clean_latent_indices,
+ clean_latents_2x=clean_latents_2x,
+ clean_latent_2x_indices=clean_latent_2x_indices,
+ clean_latents_4x=clean_latents_4x,
+ clean_latent_4x_indices=clean_latent_4x_indices,
+ callback=callback,
+ )
+
+ total_generated_latent_frames += int(generated_latents.shape[2])
+ history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
+
+ if not high_vram:
+ offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
+ load_model_as_complete(vae, target_device=gpu)
+
+ real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
+
+ if history_pixels is None:
+ history_pixels = vae_decode(real_history_latents, vae).cpu()
+ else:
+ section_latent_frames = latent_window_size * 2
+ overlapped_frames = latent_window_size * 4 - 3
+
+ current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
+ history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
+
+ if not high_vram:
+ unload_complete_models()
+
+ output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
+
+ save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
+
+ print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
+
+ stream.output_queue.push(('file', output_filename))
+ except:
+ traceback.print_exc()
+
+ if not high_vram:
+ unload_complete_models(
+ text_encoder, text_encoder_2, image_encoder, vae, transformer
+ )
+
+ stream.output_queue.push(('end', None))
+ return
+
+
+def process(input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
+ global stream
+ assert input_image is not None, 'No input image!'
+
+ yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
+
+ stream = AsyncStream()
+
+ async_run(worker, input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
+
+ output_filename = None
+
+ while True:
+ flag, data = stream.output_queue.next()
+
+ if flag == 'file':
+ output_filename = data
+ yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
+
+ if flag == 'progress':
+ preview, desc, html = data
+ yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
+
+ if flag == 'end':
+ yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
+ break
+
+
+def end_process():
+ stream.input_queue.push('end')
+
+
+quick_prompts = [
+ 'The girl dances gracefully, with clear movements, full of charm.',
+ 'A character doing some simple body movements.',
+]
+quick_prompts = [[x] for x in quick_prompts]
+
+
+css = make_progress_bar_css()
+block = gr.Blocks(css=css).queue()
+with block:
+ gr.Markdown('# FramePack-F1')
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(sources='upload', type="numpy", label="Image", height=320)
+ prompt = gr.Textbox(label="Prompt", value='')
+ example_quick_prompts = gr.Dataset(samples=quick_prompts, label='Quick List', samples_per_page=1000, components=[prompt])
+ example_quick_prompts.click(lambda x: x[0], inputs=[example_quick_prompts], outputs=prompt, show_progress=False, queue=False)
+
+ with gr.Row():
+ start_button = gr.Button(value="Start Generation")
+ end_button = gr.Button(value="End Generation", interactive=False)
+
+ with gr.Group():
+ use_teacache = gr.Checkbox(label='Use TeaCache', value=True, info='Faster speed, but often makes hands and fingers slightly worse.')
+
+ n_prompt = gr.Textbox(label="Negative Prompt", value="", visible=False) # Not used
+ seed = gr.Number(label="Seed", value=31337, precision=0)
+
+ total_second_length = gr.Slider(label="Total Video Length (Seconds)", minimum=1, maximum=120, value=5, step=0.1)
+ latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1, visible=False) # Should not change
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Changing this value is not recommended.')
+
+ cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, visible=False) # Should not change
+ gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01, info='Changing this value is not recommended.')
+ rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01, visible=False) # Should not change
+
+ gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
+
+ mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
+
+ with gr.Column():
+ preview_image = gr.Image(label="Next Latents", height=200, visible=False)
+ result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
+ progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
+ progress_bar = gr.HTML('', elem_classes='no-generating-animation')
+
+ gr.HTML('')
+
+ ips = [input_image, prompt, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
+ start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
+ end_button.click(fn=end_process)
+
+
+block.launch(
+ server_name=args.server,
+ server_port=args.port,
+ share=args.share,
+ inbrowser=args.inbrowser,
+)
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/bucket_tools.py b/exp_code/1_benchmark/FramePack/diffusers_helper/bucket_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc13fdeb11f9ac87c64dda049a06b968360e7c3f
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/bucket_tools.py
@@ -0,0 +1,30 @@
+bucket_options = {
+ 640: [
+ (416, 960),
+ (448, 864),
+ (480, 832),
+ (512, 768),
+ (544, 704),
+ (576, 672),
+ (608, 640),
+ (640, 608),
+ (672, 576),
+ (704, 544),
+ (768, 512),
+ (832, 480),
+ (864, 448),
+ (960, 416),
+ ],
+}
+
+
+def find_nearest_bucket(h, w, resolution=640):
+ min_metric = float('inf')
+ best_bucket = None
+ for (bucket_h, bucket_w) in bucket_options[resolution]:
+ metric = abs(h * bucket_w - w * bucket_h)
+ if metric <= min_metric:
+ min_metric = metric
+ best_bucket = (bucket_h, bucket_w)
+ return best_bucket
+
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/clip_vision.py b/exp_code/1_benchmark/FramePack/diffusers_helper/clip_vision.py
new file mode 100644
index 0000000000000000000000000000000000000000..aaf40dbf1b4ef975640e0ad0d5a7792652d79334
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/clip_vision.py
@@ -0,0 +1,12 @@
+import numpy as np
+
+
+def hf_clip_vision_encode(image, feature_extractor, image_encoder):
+ assert isinstance(image, np.ndarray)
+ assert image.ndim == 3 and image.shape[2] == 3
+ assert image.dtype == np.uint8
+
+ preprocessed = feature_extractor.preprocess(images=image, return_tensors="pt").to(device=image_encoder.device, dtype=image_encoder.dtype)
+ image_encoder_output = image_encoder(**preprocessed)
+
+ return image_encoder_output
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/dit_common.py b/exp_code/1_benchmark/FramePack/diffusers_helper/dit_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..f02e7b012bff0b3b0fce9136d29fee4a1d49e45e
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/dit_common.py
@@ -0,0 +1,53 @@
+import torch
+import accelerate.accelerator
+
+from diffusers.models.normalization import RMSNorm, LayerNorm, FP32LayerNorm, AdaLayerNormContinuous
+
+
+accelerate.accelerator.convert_outputs_to_fp32 = lambda x: x
+
+
+def LayerNorm_forward(self, x):
+ return torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps).to(x)
+
+
+LayerNorm.forward = LayerNorm_forward
+torch.nn.LayerNorm.forward = LayerNorm_forward
+
+
+def FP32LayerNorm_forward(self, x):
+ origin_dtype = x.dtype
+ return torch.nn.functional.layer_norm(
+ x.float(),
+ self.normalized_shape,
+ self.weight.float() if self.weight is not None else None,
+ self.bias.float() if self.bias is not None else None,
+ self.eps,
+ ).to(origin_dtype)
+
+
+FP32LayerNorm.forward = FP32LayerNorm_forward
+
+
+def RMSNorm_forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
+
+ if self.weight is None:
+ return hidden_states.to(input_dtype)
+
+ return hidden_states.to(input_dtype) * self.weight.to(input_dtype)
+
+
+RMSNorm.forward = RMSNorm_forward
+
+
+def AdaLayerNormContinuous_forward(self, x, conditioning_embedding):
+ emb = self.linear(self.silu(conditioning_embedding))
+ scale, shift = emb.chunk(2, dim=1)
+ x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :]
+ return x
+
+
+AdaLayerNormContinuous.forward = AdaLayerNormContinuous_forward
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/gradio/progress_bar.py b/exp_code/1_benchmark/FramePack/diffusers_helper/gradio/progress_bar.py
new file mode 100644
index 0000000000000000000000000000000000000000..2cc612163a171cef37d67d991d729ba9fec066db
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/gradio/progress_bar.py
@@ -0,0 +1,86 @@
+progress_html = '''
+
+'''
+
+css = '''
+.loader-container {
+ display: flex; /* Use flex to align items horizontally */
+ align-items: center; /* Center items vertically within the container */
+ white-space: nowrap; /* Prevent line breaks within the container */
+}
+
+.loader {
+ border: 8px solid #f3f3f3; /* Light grey */
+ border-top: 8px solid #3498db; /* Blue */
+ border-radius: 50%;
+ width: 30px;
+ height: 30px;
+ animation: spin 2s linear infinite;
+}
+
+@keyframes spin {
+ 0% { transform: rotate(0deg); }
+ 100% { transform: rotate(360deg); }
+}
+
+/* Style the progress bar */
+progress {
+ appearance: none; /* Remove default styling */
+ height: 20px; /* Set the height of the progress bar */
+ border-radius: 5px; /* Round the corners of the progress bar */
+ background-color: #f3f3f3; /* Light grey background */
+ width: 100%;
+ vertical-align: middle !important;
+}
+
+/* Style the progress bar container */
+.progress-container {
+ margin-left: 20px;
+ margin-right: 20px;
+ flex-grow: 1; /* Allow the progress container to take up remaining space */
+}
+
+/* Set the color of the progress bar fill */
+progress::-webkit-progress-value {
+ background-color: #3498db; /* Blue color for the fill */
+}
+
+progress::-moz-progress-bar {
+ background-color: #3498db; /* Blue color for the fill in Firefox */
+}
+
+/* Style the text on the progress bar */
+progress::after {
+ content: attr(value '%'); /* Display the progress value followed by '%' */
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ transform: translate(-50%, -50%);
+ color: white; /* Set text color */
+ font-size: 14px; /* Set font size */
+}
+
+/* Style other texts */
+.loader-container > span {
+ margin-left: 5px; /* Add spacing between the progress bar and the text */
+}
+
+.no-generating-animation > .generating {
+ display: none !important;
+}
+
+'''
+
+
+def make_progress_bar_html(number, text):
+ return progress_html.replace('*number*', str(number)).replace('*text*', text)
+
+
+def make_progress_bar_css():
+ return css
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/hf_login.py b/exp_code/1_benchmark/FramePack/diffusers_helper/hf_login.py
new file mode 100644
index 0000000000000000000000000000000000000000..b039db24378b0419e69ee97042f88e96460766ef
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/hf_login.py
@@ -0,0 +1,21 @@
+import os
+
+
+def login(token):
+ from huggingface_hub import login
+ import time
+
+ while True:
+ try:
+ login(token)
+ print('HF login ok.')
+ break
+ except Exception as e:
+ print(f'HF login failed: {e}. Retrying')
+ time.sleep(0.5)
+
+
+hf_token = os.environ.get('HF_TOKEN', None)
+
+if hf_token is not None:
+ login(hf_token)
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/hunyuan.py b/exp_code/1_benchmark/FramePack/diffusers_helper/hunyuan.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f5c8561f5701f201c3b22c182924e3b819e63bf
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/hunyuan.py
@@ -0,0 +1,111 @@
+import torch
+
+from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video import DEFAULT_PROMPT_TEMPLATE
+from diffusers_helper.utils import crop_or_pad_yield_mask
+
+
+@torch.no_grad()
+def encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2, max_length=256):
+ assert isinstance(prompt, str)
+
+ prompt = [prompt]
+
+ # LLAMA
+
+ prompt_llama = [DEFAULT_PROMPT_TEMPLATE["template"].format(p) for p in prompt]
+ crop_start = DEFAULT_PROMPT_TEMPLATE["crop_start"]
+
+ llama_inputs = tokenizer(
+ prompt_llama,
+ padding="max_length",
+ max_length=max_length + crop_start,
+ truncation=True,
+ return_tensors="pt",
+ return_length=False,
+ return_overflowing_tokens=False,
+ return_attention_mask=True,
+ )
+
+ llama_input_ids = llama_inputs.input_ids.to(text_encoder.device)
+ llama_attention_mask = llama_inputs.attention_mask.to(text_encoder.device)
+ llama_attention_length = int(llama_attention_mask.sum())
+
+ llama_outputs = text_encoder(
+ input_ids=llama_input_ids,
+ attention_mask=llama_attention_mask,
+ output_hidden_states=True,
+ )
+
+ llama_vec = llama_outputs.hidden_states[-3][:, crop_start:llama_attention_length]
+ # llama_vec_remaining = llama_outputs.hidden_states[-3][:, llama_attention_length:]
+ llama_attention_mask = llama_attention_mask[:, crop_start:llama_attention_length]
+
+ assert torch.all(llama_attention_mask.bool())
+
+ # CLIP
+
+ clip_l_input_ids = tokenizer_2(
+ prompt,
+ padding="max_length",
+ max_length=77,
+ truncation=True,
+ return_overflowing_tokens=False,
+ return_length=False,
+ return_tensors="pt",
+ ).input_ids
+ clip_l_pooler = text_encoder_2(clip_l_input_ids.to(text_encoder_2.device), output_hidden_states=False).pooler_output
+
+ return llama_vec, clip_l_pooler
+
+
+@torch.no_grad()
+def vae_decode_fake(latents):
+ latent_rgb_factors = [
+ [-0.0395, -0.0331, 0.0445],
+ [0.0696, 0.0795, 0.0518],
+ [0.0135, -0.0945, -0.0282],
+ [0.0108, -0.0250, -0.0765],
+ [-0.0209, 0.0032, 0.0224],
+ [-0.0804, -0.0254, -0.0639],
+ [-0.0991, 0.0271, -0.0669],
+ [-0.0646, -0.0422, -0.0400],
+ [-0.0696, -0.0595, -0.0894],
+ [-0.0799, -0.0208, -0.0375],
+ [0.1166, 0.1627, 0.0962],
+ [0.1165, 0.0432, 0.0407],
+ [-0.2315, -0.1920, -0.1355],
+ [-0.0270, 0.0401, -0.0821],
+ [-0.0616, -0.0997, -0.0727],
+ [0.0249, -0.0469, -0.1703]
+ ] # From comfyui
+
+ latent_rgb_factors_bias = [0.0259, -0.0192, -0.0761]
+
+ weight = torch.tensor(latent_rgb_factors, device=latents.device, dtype=latents.dtype).transpose(0, 1)[:, :, None, None, None]
+ bias = torch.tensor(latent_rgb_factors_bias, device=latents.device, dtype=latents.dtype)
+
+ images = torch.nn.functional.conv3d(latents, weight, bias=bias, stride=1, padding=0, dilation=1, groups=1)
+ images = images.clamp(0.0, 1.0)
+
+ return images
+
+
+@torch.no_grad()
+def vae_decode(latents, vae, image_mode=False):
+ latents = latents / vae.config.scaling_factor
+
+ if not image_mode:
+ image = vae.decode(latents.to(device=vae.device, dtype=vae.dtype)).sample
+ else:
+ latents = latents.to(device=vae.device, dtype=vae.dtype).unbind(2)
+ image = [vae.decode(l.unsqueeze(2)).sample for l in latents]
+ image = torch.cat(image, dim=2)
+
+ return image
+
+
+@torch.no_grad()
+def vae_encode(image, vae):
+ latents = vae.encode(image.to(device=vae.device, dtype=vae.dtype)).latent_dist.sample()
+ latents = latents * vae.config.scaling_factor
+ return latents
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/k_diffusion/uni_pc_fm.py b/exp_code/1_benchmark/FramePack/diffusers_helper/k_diffusion/uni_pc_fm.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5763532a04fc81317b773c59c9878f213abe841
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/k_diffusion/uni_pc_fm.py
@@ -0,0 +1,141 @@
+# Better Flow Matching UniPC by Lvmin Zhang
+# (c) 2025
+# CC BY-SA 4.0
+# Attribution-ShareAlike 4.0 International Licence
+
+
+import torch
+
+from tqdm.auto import trange
+
+
+def expand_dims(v, dims):
+ return v[(...,) + (None,) * (dims - 1)]
+
+
+class FlowMatchUniPC:
+ def __init__(self, model, extra_args, variant='bh1'):
+ self.model = model
+ self.variant = variant
+ self.extra_args = extra_args
+
+ def model_fn(self, x, t):
+ return self.model(x, t, **self.extra_args)
+
+ def update_fn(self, x, model_prev_list, t_prev_list, t, order):
+ assert order <= len(model_prev_list)
+ dims = x.dim()
+
+ t_prev_0 = t_prev_list[-1]
+ lambda_prev_0 = - torch.log(t_prev_0)
+ lambda_t = - torch.log(t)
+ model_prev_0 = model_prev_list[-1]
+
+ h = lambda_t - lambda_prev_0
+
+ rks = []
+ D1s = []
+ for i in range(1, order):
+ t_prev_i = t_prev_list[-(i + 1)]
+ model_prev_i = model_prev_list[-(i + 1)]
+ lambda_prev_i = - torch.log(t_prev_i)
+ rk = ((lambda_prev_i - lambda_prev_0) / h)[0]
+ rks.append(rk)
+ D1s.append((model_prev_i - model_prev_0) / rk)
+
+ rks.append(1.)
+ rks = torch.tensor(rks, device=x.device)
+
+ R = []
+ b = []
+
+ hh = -h[0]
+ h_phi_1 = torch.expm1(hh)
+ h_phi_k = h_phi_1 / hh - 1
+
+ factorial_i = 1
+
+ if self.variant == 'bh1':
+ B_h = hh
+ elif self.variant == 'bh2':
+ B_h = torch.expm1(hh)
+ else:
+ raise NotImplementedError('Bad variant!')
+
+ for i in range(1, order + 1):
+ R.append(torch.pow(rks, i - 1))
+ b.append(h_phi_k * factorial_i / B_h)
+ factorial_i *= (i + 1)
+ h_phi_k = h_phi_k / hh - 1 / factorial_i
+
+ R = torch.stack(R)
+ b = torch.tensor(b, device=x.device)
+
+ use_predictor = len(D1s) > 0
+
+ if use_predictor:
+ D1s = torch.stack(D1s, dim=1)
+ if order == 2:
+ rhos_p = torch.tensor([0.5], device=b.device)
+ else:
+ rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1])
+ else:
+ D1s = None
+ rhos_p = None
+
+ if order == 1:
+ rhos_c = torch.tensor([0.5], device=b.device)
+ else:
+ rhos_c = torch.linalg.solve(R, b)
+
+ x_t_ = expand_dims(t / t_prev_0, dims) * x - expand_dims(h_phi_1, dims) * model_prev_0
+
+ if use_predictor:
+ pred_res = torch.tensordot(D1s, rhos_p, dims=([1], [0]))
+ else:
+ pred_res = 0
+
+ x_t = x_t_ - expand_dims(B_h, dims) * pred_res
+ model_t = self.model_fn(x_t, t)
+
+ if D1s is not None:
+ corr_res = torch.tensordot(D1s, rhos_c[:-1], dims=([1], [0]))
+ else:
+ corr_res = 0
+
+ D1_t = (model_t - model_prev_0)
+ x_t = x_t_ - expand_dims(B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
+
+ return x_t, model_t
+
+ def sample(self, x, sigmas, callback=None, disable_pbar=False):
+ order = min(3, len(sigmas) - 2)
+ model_prev_list, t_prev_list = [], []
+ for i in trange(len(sigmas) - 1, disable=disable_pbar):
+ vec_t = sigmas[i].expand(x.shape[0])
+
+ if i == 0:
+ model_prev_list = [self.model_fn(x, vec_t)]
+ t_prev_list = [vec_t]
+ elif i < order:
+ init_order = i
+ x, model_x = self.update_fn(x, model_prev_list, t_prev_list, vec_t, init_order)
+ model_prev_list.append(model_x)
+ t_prev_list.append(vec_t)
+ else:
+ x, model_x = self.update_fn(x, model_prev_list, t_prev_list, vec_t, order)
+ model_prev_list.append(model_x)
+ t_prev_list.append(vec_t)
+
+ model_prev_list = model_prev_list[-order:]
+ t_prev_list = t_prev_list[-order:]
+
+ if callback is not None:
+ callback({'x': x, 'i': i, 'denoised': model_prev_list[-1]})
+
+ return model_prev_list[-1]
+
+
+def sample_unipc(model, noise, sigmas, extra_args=None, callback=None, disable=False, variant='bh1'):
+ assert variant in ['bh1', 'bh2']
+ return FlowMatchUniPC(model, extra_args=extra_args, variant=variant).sample(noise, sigmas=sigmas, callback=callback, disable_pbar=disable)
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/k_diffusion/wrapper.py b/exp_code/1_benchmark/FramePack/diffusers_helper/k_diffusion/wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc420da4db1134deca30648077923021b35f82d1
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/k_diffusion/wrapper.py
@@ -0,0 +1,51 @@
+import torch
+
+
+def append_dims(x, target_dims):
+ return x[(...,) + (None,) * (target_dims - x.ndim)]
+
+
+def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=1.0):
+ if guidance_rescale == 0:
+ return noise_cfg
+
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1.0 - guidance_rescale) * noise_cfg
+ return noise_cfg
+
+
+def fm_wrapper(transformer, t_scale=1000.0):
+ def k_model(x, sigma, **extra_args):
+ dtype = extra_args['dtype']
+ cfg_scale = extra_args['cfg_scale']
+ cfg_rescale = extra_args['cfg_rescale']
+ concat_latent = extra_args['concat_latent']
+
+ original_dtype = x.dtype
+ sigma = sigma.float()
+
+ x = x.to(dtype)
+ timestep = (sigma * t_scale).to(dtype)
+
+ if concat_latent is None:
+ hidden_states = x
+ else:
+ hidden_states = torch.cat([x, concat_latent.to(x)], dim=1)
+
+ pred_positive = transformer(hidden_states=hidden_states, timestep=timestep, return_dict=False, **extra_args['positive'])[0].float()
+
+ if cfg_scale == 1.0:
+ pred_negative = torch.zeros_like(pred_positive)
+ else:
+ pred_negative = transformer(hidden_states=hidden_states, timestep=timestep, return_dict=False, **extra_args['negative'])[0].float()
+
+ pred_cfg = pred_negative + cfg_scale * (pred_positive - pred_negative)
+ pred = rescale_noise_cfg(pred_cfg, pred_positive, guidance_rescale=cfg_rescale)
+
+ x0 = x.float() - pred.float() * append_dims(sigma, x.ndim)
+
+ return x0.to(dtype=original_dtype)
+
+ return k_model
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/memory.py b/exp_code/1_benchmark/FramePack/diffusers_helper/memory.py
new file mode 100644
index 0000000000000000000000000000000000000000..3380c538a185b0cbd07657ea475d0f5a0aeb17d3
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/memory.py
@@ -0,0 +1,134 @@
+# By lllyasviel
+
+
+import torch
+
+
+cpu = torch.device('cpu')
+gpu = torch.device(f'cuda:{torch.cuda.current_device()}')
+gpu_complete_modules = []
+
+
+class DynamicSwapInstaller:
+ @staticmethod
+ def _install_module(module: torch.nn.Module, **kwargs):
+ original_class = module.__class__
+ module.__dict__['forge_backup_original_class'] = original_class
+
+ def hacked_get_attr(self, name: str):
+ if '_parameters' in self.__dict__:
+ _parameters = self.__dict__['_parameters']
+ if name in _parameters:
+ p = _parameters[name]
+ if p is None:
+ return None
+ if p.__class__ == torch.nn.Parameter:
+ return torch.nn.Parameter(p.to(**kwargs), requires_grad=p.requires_grad)
+ else:
+ return p.to(**kwargs)
+ if '_buffers' in self.__dict__:
+ _buffers = self.__dict__['_buffers']
+ if name in _buffers:
+ return _buffers[name].to(**kwargs)
+ return super(original_class, self).__getattr__(name)
+
+ module.__class__ = type('DynamicSwap_' + original_class.__name__, (original_class,), {
+ '__getattr__': hacked_get_attr,
+ })
+
+ return
+
+ @staticmethod
+ def _uninstall_module(module: torch.nn.Module):
+ if 'forge_backup_original_class' in module.__dict__:
+ module.__class__ = module.__dict__.pop('forge_backup_original_class')
+ return
+
+ @staticmethod
+ def install_model(model: torch.nn.Module, **kwargs):
+ for m in model.modules():
+ DynamicSwapInstaller._install_module(m, **kwargs)
+ return
+
+ @staticmethod
+ def uninstall_model(model: torch.nn.Module):
+ for m in model.modules():
+ DynamicSwapInstaller._uninstall_module(m)
+ return
+
+
+def fake_diffusers_current_device(model: torch.nn.Module, target_device: torch.device):
+ if hasattr(model, 'scale_shift_table'):
+ model.scale_shift_table.data = model.scale_shift_table.data.to(target_device)
+ return
+
+ for k, p in model.named_modules():
+ if hasattr(p, 'weight'):
+ p.to(target_device)
+ return
+
+
+def get_cuda_free_memory_gb(device=None):
+ if device is None:
+ device = gpu
+
+ memory_stats = torch.cuda.memory_stats(device)
+ bytes_active = memory_stats['active_bytes.all.current']
+ bytes_reserved = memory_stats['reserved_bytes.all.current']
+ bytes_free_cuda, _ = torch.cuda.mem_get_info(device)
+ bytes_inactive_reserved = bytes_reserved - bytes_active
+ bytes_total_available = bytes_free_cuda + bytes_inactive_reserved
+ return bytes_total_available / (1024 ** 3)
+
+
+def move_model_to_device_with_memory_preservation(model, target_device, preserved_memory_gb=0):
+ print(f'Moving {model.__class__.__name__} to {target_device} with preserved memory: {preserved_memory_gb} GB')
+
+ for m in model.modules():
+ if get_cuda_free_memory_gb(target_device) <= preserved_memory_gb:
+ torch.cuda.empty_cache()
+ return
+
+ if hasattr(m, 'weight'):
+ m.to(device=target_device)
+
+ model.to(device=target_device)
+ torch.cuda.empty_cache()
+ return
+
+
+def offload_model_from_device_for_memory_preservation(model, target_device, preserved_memory_gb=0):
+ print(f'Offloading {model.__class__.__name__} from {target_device} to preserve memory: {preserved_memory_gb} GB')
+
+ for m in model.modules():
+ if get_cuda_free_memory_gb(target_device) >= preserved_memory_gb:
+ torch.cuda.empty_cache()
+ return
+
+ if hasattr(m, 'weight'):
+ m.to(device=cpu)
+
+ model.to(device=cpu)
+ torch.cuda.empty_cache()
+ return
+
+
+def unload_complete_models(*args):
+ for m in gpu_complete_modules + list(args):
+ m.to(device=cpu)
+ print(f'Unloaded {m.__class__.__name__} as complete.')
+
+ gpu_complete_modules.clear()
+ torch.cuda.empty_cache()
+ return
+
+
+def load_model_as_complete(model, target_device, unload=True):
+ if unload:
+ unload_complete_models()
+
+ model.to(device=target_device)
+ print(f'Loaded {model.__class__.__name__} to {target_device} as complete.')
+
+ gpu_complete_modules.append(model)
+ return
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/models/hunyuan_video_packed.py b/exp_code/1_benchmark/FramePack/diffusers_helper/models/hunyuan_video_packed.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cb42abcb52c20a457e50aa066ec1d4a89c6d57f
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/models/hunyuan_video_packed.py
@@ -0,0 +1,1035 @@
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import torch
+import einops
+import torch.nn as nn
+import numpy as np
+
+from diffusers.loaders import FromOriginalModelMixin
+from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers.loaders import PeftAdapterMixin
+from diffusers.utils import logging
+from diffusers.models.attention import FeedForward
+from diffusers.models.attention_processor import Attention
+from diffusers.models.embeddings import TimestepEmbedding, Timesteps, PixArtAlphaTextProjection
+from diffusers.models.modeling_outputs import Transformer2DModelOutput
+from diffusers.models.modeling_utils import ModelMixin
+from diffusers_helper.dit_common import LayerNorm
+from diffusers_helper.utils import zero_module
+
+
+enabled_backends = []
+
+if torch.backends.cuda.flash_sdp_enabled():
+ enabled_backends.append("flash")
+if torch.backends.cuda.math_sdp_enabled():
+ enabled_backends.append("math")
+if torch.backends.cuda.mem_efficient_sdp_enabled():
+ enabled_backends.append("mem_efficient")
+if torch.backends.cuda.cudnn_sdp_enabled():
+ enabled_backends.append("cudnn")
+
+print("Currently enabled native sdp backends:", enabled_backends)
+
+try:
+ # raise NotImplementedError
+ from xformers.ops import memory_efficient_attention as xformers_attn_func
+ print('Xformers is installed!')
+except:
+ print('Xformers is not installed!')
+ xformers_attn_func = None
+
+try:
+ # raise NotImplementedError
+ from flash_attn import flash_attn_varlen_func, flash_attn_func
+ print('Flash Attn is installed!')
+except:
+ print('Flash Attn is not installed!')
+ flash_attn_varlen_func = None
+ flash_attn_func = None
+
+try:
+ # raise NotImplementedError
+ from sageattention import sageattn_varlen, sageattn
+ print('Sage Attn is installed!')
+except:
+ print('Sage Attn is not installed!')
+ sageattn_varlen = None
+ sageattn = None
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+def pad_for_3d_conv(x, kernel_size):
+ b, c, t, h, w = x.shape
+ pt, ph, pw = kernel_size
+ pad_t = (pt - (t % pt)) % pt
+ pad_h = (ph - (h % ph)) % ph
+ pad_w = (pw - (w % pw)) % pw
+ return torch.nn.functional.pad(x, (0, pad_w, 0, pad_h, 0, pad_t), mode='replicate')
+
+
+def center_down_sample_3d(x, kernel_size):
+ # pt, ph, pw = kernel_size
+ # cp = (pt * ph * pw) // 2
+ # xp = einops.rearrange(x, 'b c (t pt) (h ph) (w pw) -> (pt ph pw) b c t h w', pt=pt, ph=ph, pw=pw)
+ # xc = xp[cp]
+ # return xc
+ return torch.nn.functional.avg_pool3d(x, kernel_size, stride=kernel_size)
+
+
+def get_cu_seqlens(text_mask, img_len):
+ batch_size = text_mask.shape[0]
+ text_len = text_mask.sum(dim=1)
+ max_len = text_mask.shape[1] + img_len
+
+ cu_seqlens = torch.zeros([2 * batch_size + 1], dtype=torch.int32, device="cuda")
+
+ for i in range(batch_size):
+ s = text_len[i] + img_len
+ s1 = i * max_len + s
+ s2 = (i + 1) * max_len
+ cu_seqlens[2 * i + 1] = s1
+ cu_seqlens[2 * i + 2] = s2
+
+ return cu_seqlens
+
+
+def apply_rotary_emb_transposed(x, freqs_cis):
+ cos, sin = freqs_cis.unsqueeze(-2).chunk(2, dim=-1)
+ x_real, x_imag = x.unflatten(-1, (-1, 2)).unbind(-1)
+ x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3)
+ out = x.float() * cos + x_rotated.float() * sin
+ out = out.to(x)
+ return out
+
+
+def attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv):
+ if cu_seqlens_q is None and cu_seqlens_kv is None and max_seqlen_q is None and max_seqlen_kv is None:
+ if sageattn is not None:
+ x = sageattn(q, k, v, tensor_layout='NHD')
+ return x
+
+ if flash_attn_func is not None:
+ x = flash_attn_func(q, k, v)
+ return x
+
+ if xformers_attn_func is not None:
+ x = xformers_attn_func(q, k, v)
+ return x
+
+ x = torch.nn.functional.scaled_dot_product_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)).transpose(1, 2)
+ return x
+
+ B, L, H, C = q.shape
+
+ q = q.flatten(0, 1)
+ k = k.flatten(0, 1)
+ v = v.flatten(0, 1)
+
+ if sageattn_varlen is not None:
+ x = sageattn_varlen(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv)
+ elif flash_attn_varlen_func is not None:
+ x = flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv)
+ else:
+ raise NotImplementedError('No Attn Installed!')
+
+ x = x.unflatten(0, (B, L))
+
+ return x
+
+
+class HunyuanAttnProcessorFlashAttnDouble:
+ def __call__(self, attn, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb):
+ cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv = attention_mask
+
+ query = attn.to_q(hidden_states)
+ key = attn.to_k(hidden_states)
+ value = attn.to_v(hidden_states)
+
+ query = query.unflatten(2, (attn.heads, -1))
+ key = key.unflatten(2, (attn.heads, -1))
+ value = value.unflatten(2, (attn.heads, -1))
+
+ query = attn.norm_q(query)
+ key = attn.norm_k(key)
+
+ query = apply_rotary_emb_transposed(query, image_rotary_emb)
+ key = apply_rotary_emb_transposed(key, image_rotary_emb)
+
+ encoder_query = attn.add_q_proj(encoder_hidden_states)
+ encoder_key = attn.add_k_proj(encoder_hidden_states)
+ encoder_value = attn.add_v_proj(encoder_hidden_states)
+
+ encoder_query = encoder_query.unflatten(2, (attn.heads, -1))
+ encoder_key = encoder_key.unflatten(2, (attn.heads, -1))
+ encoder_value = encoder_value.unflatten(2, (attn.heads, -1))
+
+ encoder_query = attn.norm_added_q(encoder_query)
+ encoder_key = attn.norm_added_k(encoder_key)
+
+ query = torch.cat([query, encoder_query], dim=1)
+ key = torch.cat([key, encoder_key], dim=1)
+ value = torch.cat([value, encoder_value], dim=1)
+
+ hidden_states = attn_varlen_func(query, key, value, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv)
+ hidden_states = hidden_states.flatten(-2)
+
+ txt_length = encoder_hidden_states.shape[1]
+ hidden_states, encoder_hidden_states = hidden_states[:, :-txt_length], hidden_states[:, -txt_length:]
+
+ hidden_states = attn.to_out[0](hidden_states)
+ hidden_states = attn.to_out[1](hidden_states)
+ encoder_hidden_states = attn.to_add_out(encoder_hidden_states)
+
+ return hidden_states, encoder_hidden_states
+
+
+class HunyuanAttnProcessorFlashAttnSingle:
+ def __call__(self, attn, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb):
+ cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv = attention_mask
+
+ hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1)
+
+ query = attn.to_q(hidden_states)
+ key = attn.to_k(hidden_states)
+ value = attn.to_v(hidden_states)
+
+ query = query.unflatten(2, (attn.heads, -1))
+ key = key.unflatten(2, (attn.heads, -1))
+ value = value.unflatten(2, (attn.heads, -1))
+
+ query = attn.norm_q(query)
+ key = attn.norm_k(key)
+
+ txt_length = encoder_hidden_states.shape[1]
+
+ query = torch.cat([apply_rotary_emb_transposed(query[:, :-txt_length], image_rotary_emb), query[:, -txt_length:]], dim=1)
+ key = torch.cat([apply_rotary_emb_transposed(key[:, :-txt_length], image_rotary_emb), key[:, -txt_length:]], dim=1)
+
+ hidden_states = attn_varlen_func(query, key, value, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv)
+ hidden_states = hidden_states.flatten(-2)
+
+ hidden_states, encoder_hidden_states = hidden_states[:, :-txt_length], hidden_states[:, -txt_length:]
+
+ return hidden_states, encoder_hidden_states
+
+
+class CombinedTimestepGuidanceTextProjEmbeddings(nn.Module):
+ def __init__(self, embedding_dim, pooled_projection_dim):
+ super().__init__()
+
+ self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
+ self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
+ self.guidance_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
+ self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn="silu")
+
+ def forward(self, timestep, guidance, pooled_projection):
+ timesteps_proj = self.time_proj(timestep)
+ timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype))
+
+ guidance_proj = self.time_proj(guidance)
+ guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype))
+
+ time_guidance_emb = timesteps_emb + guidance_emb
+
+ pooled_projections = self.text_embedder(pooled_projection)
+ conditioning = time_guidance_emb + pooled_projections
+
+ return conditioning
+
+
+class CombinedTimestepTextProjEmbeddings(nn.Module):
+ def __init__(self, embedding_dim, pooled_projection_dim):
+ super().__init__()
+
+ self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0)
+ self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
+ self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn="silu")
+
+ def forward(self, timestep, pooled_projection):
+ timesteps_proj = self.time_proj(timestep)
+ timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype))
+
+ pooled_projections = self.text_embedder(pooled_projection)
+
+ conditioning = timesteps_emb + pooled_projections
+
+ return conditioning
+
+
+class HunyuanVideoAdaNorm(nn.Module):
+ def __init__(self, in_features: int, out_features: Optional[int] = None) -> None:
+ super().__init__()
+
+ out_features = out_features or 2 * in_features
+ self.linear = nn.Linear(in_features, out_features)
+ self.nonlinearity = nn.SiLU()
+
+ def forward(
+ self, temb: torch.Tensor
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
+ temb = self.linear(self.nonlinearity(temb))
+ gate_msa, gate_mlp = temb.chunk(2, dim=-1)
+ gate_msa, gate_mlp = gate_msa.unsqueeze(1), gate_mlp.unsqueeze(1)
+ return gate_msa, gate_mlp
+
+
+class HunyuanVideoIndividualTokenRefinerBlock(nn.Module):
+ def __init__(
+ self,
+ num_attention_heads: int,
+ attention_head_dim: int,
+ mlp_width_ratio: str = 4.0,
+ mlp_drop_rate: float = 0.0,
+ attention_bias: bool = True,
+ ) -> None:
+ super().__init__()
+
+ hidden_size = num_attention_heads * attention_head_dim
+
+ self.norm1 = LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6)
+ self.attn = Attention(
+ query_dim=hidden_size,
+ cross_attention_dim=None,
+ heads=num_attention_heads,
+ dim_head=attention_head_dim,
+ bias=attention_bias,
+ )
+
+ self.norm2 = LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6)
+ self.ff = FeedForward(hidden_size, mult=mlp_width_ratio, activation_fn="linear-silu", dropout=mlp_drop_rate)
+
+ self.norm_out = HunyuanVideoAdaNorm(hidden_size, 2 * hidden_size)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ temb: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ ) -> torch.Tensor:
+ norm_hidden_states = self.norm1(hidden_states)
+
+ attn_output = self.attn(
+ hidden_states=norm_hidden_states,
+ encoder_hidden_states=None,
+ attention_mask=attention_mask,
+ )
+
+ gate_msa, gate_mlp = self.norm_out(temb)
+ hidden_states = hidden_states + attn_output * gate_msa
+
+ ff_output = self.ff(self.norm2(hidden_states))
+ hidden_states = hidden_states + ff_output * gate_mlp
+
+ return hidden_states
+
+
+class HunyuanVideoIndividualTokenRefiner(nn.Module):
+ def __init__(
+ self,
+ num_attention_heads: int,
+ attention_head_dim: int,
+ num_layers: int,
+ mlp_width_ratio: float = 4.0,
+ mlp_drop_rate: float = 0.0,
+ attention_bias: bool = True,
+ ) -> None:
+ super().__init__()
+
+ self.refiner_blocks = nn.ModuleList(
+ [
+ HunyuanVideoIndividualTokenRefinerBlock(
+ num_attention_heads=num_attention_heads,
+ attention_head_dim=attention_head_dim,
+ mlp_width_ratio=mlp_width_ratio,
+ mlp_drop_rate=mlp_drop_rate,
+ attention_bias=attention_bias,
+ )
+ for _ in range(num_layers)
+ ]
+ )
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ temb: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ ) -> None:
+ self_attn_mask = None
+ if attention_mask is not None:
+ batch_size = attention_mask.shape[0]
+ seq_len = attention_mask.shape[1]
+ attention_mask = attention_mask.to(hidden_states.device).bool()
+ self_attn_mask_1 = attention_mask.view(batch_size, 1, 1, seq_len).repeat(1, 1, seq_len, 1)
+ self_attn_mask_2 = self_attn_mask_1.transpose(2, 3)
+ self_attn_mask = (self_attn_mask_1 & self_attn_mask_2).bool()
+ self_attn_mask[:, :, :, 0] = True
+
+ for block in self.refiner_blocks:
+ hidden_states = block(hidden_states, temb, self_attn_mask)
+
+ return hidden_states
+
+
+class HunyuanVideoTokenRefiner(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ num_attention_heads: int,
+ attention_head_dim: int,
+ num_layers: int,
+ mlp_ratio: float = 4.0,
+ mlp_drop_rate: float = 0.0,
+ attention_bias: bool = True,
+ ) -> None:
+ super().__init__()
+
+ hidden_size = num_attention_heads * attention_head_dim
+
+ self.time_text_embed = CombinedTimestepTextProjEmbeddings(
+ embedding_dim=hidden_size, pooled_projection_dim=in_channels
+ )
+ self.proj_in = nn.Linear(in_channels, hidden_size, bias=True)
+ self.token_refiner = HunyuanVideoIndividualTokenRefiner(
+ num_attention_heads=num_attention_heads,
+ attention_head_dim=attention_head_dim,
+ num_layers=num_layers,
+ mlp_width_ratio=mlp_ratio,
+ mlp_drop_rate=mlp_drop_rate,
+ attention_bias=attention_bias,
+ )
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ timestep: torch.LongTensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ ) -> torch.Tensor:
+ if attention_mask is None:
+ pooled_projections = hidden_states.mean(dim=1)
+ else:
+ original_dtype = hidden_states.dtype
+ mask_float = attention_mask.float().unsqueeze(-1)
+ pooled_projections = (hidden_states * mask_float).sum(dim=1) / mask_float.sum(dim=1)
+ pooled_projections = pooled_projections.to(original_dtype)
+
+ temb = self.time_text_embed(timestep, pooled_projections)
+ hidden_states = self.proj_in(hidden_states)
+ hidden_states = self.token_refiner(hidden_states, temb, attention_mask)
+
+ return hidden_states
+
+
+class HunyuanVideoRotaryPosEmbed(nn.Module):
+ def __init__(self, rope_dim, theta):
+ super().__init__()
+ self.DT, self.DY, self.DX = rope_dim
+ self.theta = theta
+
+ @torch.no_grad()
+ def get_frequency(self, dim, pos):
+ T, H, W = pos.shape
+ freqs = 1.0 / (self.theta ** (torch.arange(0, dim, 2, dtype=torch.float32, device=pos.device)[: (dim // 2)] / dim))
+ freqs = torch.outer(freqs, pos.reshape(-1)).unflatten(-1, (T, H, W)).repeat_interleave(2, dim=0)
+ return freqs.cos(), freqs.sin()
+
+ @torch.no_grad()
+ def forward_inner(self, frame_indices, height, width, device):
+ GT, GY, GX = torch.meshgrid(
+ frame_indices.to(device=device, dtype=torch.float32),
+ torch.arange(0, height, device=device, dtype=torch.float32),
+ torch.arange(0, width, device=device, dtype=torch.float32),
+ indexing="ij"
+ )
+
+ FCT, FST = self.get_frequency(self.DT, GT)
+ FCY, FSY = self.get_frequency(self.DY, GY)
+ FCX, FSX = self.get_frequency(self.DX, GX)
+
+ result = torch.cat([FCT, FCY, FCX, FST, FSY, FSX], dim=0)
+
+ return result.to(device)
+
+ @torch.no_grad()
+ def forward(self, frame_indices, height, width, device):
+ frame_indices = frame_indices.unbind(0)
+ results = [self.forward_inner(f, height, width, device) for f in frame_indices]
+ results = torch.stack(results, dim=0)
+ return results
+
+
+class AdaLayerNormZero(nn.Module):
+ def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True):
+ super().__init__()
+ self.silu = nn.SiLU()
+ self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=bias)
+ if norm_type == "layer_norm":
+ self.norm = LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
+ else:
+ raise ValueError(f"unknown norm_type {norm_type}")
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ emb: Optional[torch.Tensor] = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
+ emb = emb.unsqueeze(-2)
+ emb = self.linear(self.silu(emb))
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=-1)
+ x = self.norm(x) * (1 + scale_msa) + shift_msa
+ return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
+
+
+class AdaLayerNormZeroSingle(nn.Module):
+ def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True):
+ super().__init__()
+
+ self.silu = nn.SiLU()
+ self.linear = nn.Linear(embedding_dim, 3 * embedding_dim, bias=bias)
+ if norm_type == "layer_norm":
+ self.norm = LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
+ else:
+ raise ValueError(f"unknown norm_type {norm_type}")
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ emb: Optional[torch.Tensor] = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
+ emb = emb.unsqueeze(-2)
+ emb = self.linear(self.silu(emb))
+ shift_msa, scale_msa, gate_msa = emb.chunk(3, dim=-1)
+ x = self.norm(x) * (1 + scale_msa) + shift_msa
+ return x, gate_msa
+
+
+class AdaLayerNormContinuous(nn.Module):
+ def __init__(
+ self,
+ embedding_dim: int,
+ conditioning_embedding_dim: int,
+ elementwise_affine=True,
+ eps=1e-5,
+ bias=True,
+ norm_type="layer_norm",
+ ):
+ super().__init__()
+ self.silu = nn.SiLU()
+ self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias)
+ if norm_type == "layer_norm":
+ self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias)
+ else:
+ raise ValueError(f"unknown norm_type {norm_type}")
+
+ def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor:
+ emb = emb.unsqueeze(-2)
+ emb = self.linear(self.silu(emb))
+ scale, shift = emb.chunk(2, dim=-1)
+ x = self.norm(x) * (1 + scale) + shift
+ return x
+
+
+class HunyuanVideoSingleTransformerBlock(nn.Module):
+ def __init__(
+ self,
+ num_attention_heads: int,
+ attention_head_dim: int,
+ mlp_ratio: float = 4.0,
+ qk_norm: str = "rms_norm",
+ ) -> None:
+ super().__init__()
+
+ hidden_size = num_attention_heads * attention_head_dim
+ mlp_dim = int(hidden_size * mlp_ratio)
+
+ self.attn = Attention(
+ query_dim=hidden_size,
+ cross_attention_dim=None,
+ dim_head=attention_head_dim,
+ heads=num_attention_heads,
+ out_dim=hidden_size,
+ bias=True,
+ processor=HunyuanAttnProcessorFlashAttnSingle(),
+ qk_norm=qk_norm,
+ eps=1e-6,
+ pre_only=True,
+ )
+
+ self.norm = AdaLayerNormZeroSingle(hidden_size, norm_type="layer_norm")
+ self.proj_mlp = nn.Linear(hidden_size, mlp_dim)
+ self.act_mlp = nn.GELU(approximate="tanh")
+ self.proj_out = nn.Linear(hidden_size + mlp_dim, hidden_size)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ encoder_hidden_states: torch.Tensor,
+ temb: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
+ ) -> torch.Tensor:
+ text_seq_length = encoder_hidden_states.shape[1]
+ hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1)
+
+ residual = hidden_states
+
+ # 1. Input normalization
+ norm_hidden_states, gate = self.norm(hidden_states, emb=temb)
+ mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states))
+
+ norm_hidden_states, norm_encoder_hidden_states = (
+ norm_hidden_states[:, :-text_seq_length, :],
+ norm_hidden_states[:, -text_seq_length:, :],
+ )
+
+ # 2. Attention
+ attn_output, context_attn_output = self.attn(
+ hidden_states=norm_hidden_states,
+ encoder_hidden_states=norm_encoder_hidden_states,
+ attention_mask=attention_mask,
+ image_rotary_emb=image_rotary_emb,
+ )
+ attn_output = torch.cat([attn_output, context_attn_output], dim=1)
+
+ # 3. Modulation and residual connection
+ hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2)
+ hidden_states = gate * self.proj_out(hidden_states)
+ hidden_states = hidden_states + residual
+
+ hidden_states, encoder_hidden_states = (
+ hidden_states[:, :-text_seq_length, :],
+ hidden_states[:, -text_seq_length:, :],
+ )
+ return hidden_states, encoder_hidden_states
+
+
+class HunyuanVideoTransformerBlock(nn.Module):
+ def __init__(
+ self,
+ num_attention_heads: int,
+ attention_head_dim: int,
+ mlp_ratio: float,
+ qk_norm: str = "rms_norm",
+ ) -> None:
+ super().__init__()
+
+ hidden_size = num_attention_heads * attention_head_dim
+
+ self.norm1 = AdaLayerNormZero(hidden_size, norm_type="layer_norm")
+ self.norm1_context = AdaLayerNormZero(hidden_size, norm_type="layer_norm")
+
+ self.attn = Attention(
+ query_dim=hidden_size,
+ cross_attention_dim=None,
+ added_kv_proj_dim=hidden_size,
+ dim_head=attention_head_dim,
+ heads=num_attention_heads,
+ out_dim=hidden_size,
+ context_pre_only=False,
+ bias=True,
+ processor=HunyuanAttnProcessorFlashAttnDouble(),
+ qk_norm=qk_norm,
+ eps=1e-6,
+ )
+
+ self.norm2 = LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.ff = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate")
+
+ self.norm2_context = LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.ff_context = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate")
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ encoder_hidden_states: torch.Tensor,
+ temb: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ freqs_cis: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ # 1. Input normalization
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
+ norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(encoder_hidden_states, emb=temb)
+
+ # 2. Joint attention
+ attn_output, context_attn_output = self.attn(
+ hidden_states=norm_hidden_states,
+ encoder_hidden_states=norm_encoder_hidden_states,
+ attention_mask=attention_mask,
+ image_rotary_emb=freqs_cis,
+ )
+
+ # 3. Modulation and residual connection
+ hidden_states = hidden_states + attn_output * gate_msa
+ encoder_hidden_states = encoder_hidden_states + context_attn_output * c_gate_msa
+
+ norm_hidden_states = self.norm2(hidden_states)
+ norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
+
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp) + c_shift_mlp
+
+ # 4. Feed-forward
+ ff_output = self.ff(norm_hidden_states)
+ context_ff_output = self.ff_context(norm_encoder_hidden_states)
+
+ hidden_states = hidden_states + gate_mlp * ff_output
+ encoder_hidden_states = encoder_hidden_states + c_gate_mlp * context_ff_output
+
+ return hidden_states, encoder_hidden_states
+
+
+class ClipVisionProjection(nn.Module):
+ def __init__(self, in_channels, out_channels):
+ super().__init__()
+ self.up = nn.Linear(in_channels, out_channels * 3)
+ self.down = nn.Linear(out_channels * 3, out_channels)
+
+ def forward(self, x):
+ projected_x = self.down(nn.functional.silu(self.up(x)))
+ return projected_x
+
+
+class HunyuanVideoPatchEmbed(nn.Module):
+ def __init__(self, patch_size, in_chans, embed_dim):
+ super().__init__()
+ self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
+
+
+class HunyuanVideoPatchEmbedForCleanLatents(nn.Module):
+ def __init__(self, inner_dim):
+ super().__init__()
+ self.proj = nn.Conv3d(16, inner_dim, kernel_size=(1, 2, 2), stride=(1, 2, 2))
+ self.proj_2x = nn.Conv3d(16, inner_dim, kernel_size=(2, 4, 4), stride=(2, 4, 4))
+ self.proj_4x = nn.Conv3d(16, inner_dim, kernel_size=(4, 8, 8), stride=(4, 8, 8))
+
+ @torch.no_grad()
+ def initialize_weight_from_another_conv3d(self, another_layer):
+ weight = another_layer.weight.detach().clone()
+ bias = another_layer.bias.detach().clone()
+
+ sd = {
+ 'proj.weight': weight.clone(),
+ 'proj.bias': bias.clone(),
+ 'proj_2x.weight': einops.repeat(weight, 'b c t h w -> b c (t tk) (h hk) (w wk)', tk=2, hk=2, wk=2) / 8.0,
+ 'proj_2x.bias': bias.clone(),
+ 'proj_4x.weight': einops.repeat(weight, 'b c t h w -> b c (t tk) (h hk) (w wk)', tk=4, hk=4, wk=4) / 64.0,
+ 'proj_4x.bias': bias.clone(),
+ }
+
+ sd = {k: v.clone() for k, v in sd.items()}
+
+ self.load_state_dict(sd)
+ return
+
+
+class HunyuanVideoTransformer3DModelPacked(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
+ @register_to_config
+ def __init__(
+ self,
+ in_channels: int = 16,
+ out_channels: int = 16,
+ num_attention_heads: int = 24,
+ attention_head_dim: int = 128,
+ num_layers: int = 20,
+ num_single_layers: int = 40,
+ num_refiner_layers: int = 2,
+ mlp_ratio: float = 4.0,
+ patch_size: int = 2,
+ patch_size_t: int = 1,
+ qk_norm: str = "rms_norm",
+ guidance_embeds: bool = True,
+ text_embed_dim: int = 4096,
+ pooled_projection_dim: int = 768,
+ rope_theta: float = 256.0,
+ rope_axes_dim: Tuple[int] = (16, 56, 56),
+ has_image_proj=False,
+ image_proj_dim=1152,
+ has_clean_x_embedder=False,
+ ) -> None:
+ super().__init__()
+
+ inner_dim = num_attention_heads * attention_head_dim
+ out_channels = out_channels or in_channels
+
+ # 1. Latent and condition embedders
+ self.x_embedder = HunyuanVideoPatchEmbed((patch_size_t, patch_size, patch_size), in_channels, inner_dim)
+ self.context_embedder = HunyuanVideoTokenRefiner(
+ text_embed_dim, num_attention_heads, attention_head_dim, num_layers=num_refiner_layers
+ )
+ self.time_text_embed = CombinedTimestepGuidanceTextProjEmbeddings(inner_dim, pooled_projection_dim)
+
+ self.clean_x_embedder = None
+ self.image_projection = None
+
+ # 2. RoPE
+ self.rope = HunyuanVideoRotaryPosEmbed(rope_axes_dim, rope_theta)
+
+ # 3. Dual stream transformer blocks
+ self.transformer_blocks = nn.ModuleList(
+ [
+ HunyuanVideoTransformerBlock(
+ num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm
+ )
+ for _ in range(num_layers)
+ ]
+ )
+
+ # 4. Single stream transformer blocks
+ self.single_transformer_blocks = nn.ModuleList(
+ [
+ HunyuanVideoSingleTransformerBlock(
+ num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm
+ )
+ for _ in range(num_single_layers)
+ ]
+ )
+
+ # 5. Output projection
+ self.norm_out = AdaLayerNormContinuous(inner_dim, inner_dim, elementwise_affine=False, eps=1e-6)
+ self.proj_out = nn.Linear(inner_dim, patch_size_t * patch_size * patch_size * out_channels)
+
+ self.inner_dim = inner_dim
+ self.use_gradient_checkpointing = False
+ self.enable_teacache = False
+
+ if has_image_proj:
+ self.install_image_projection(image_proj_dim)
+
+ if has_clean_x_embedder:
+ self.install_clean_x_embedder()
+
+ self.high_quality_fp32_output_for_inference = False
+
+ def install_image_projection(self, in_channels):
+ self.image_projection = ClipVisionProjection(in_channels=in_channels, out_channels=self.inner_dim)
+ self.config['has_image_proj'] = True
+ self.config['image_proj_dim'] = in_channels
+
+ def install_clean_x_embedder(self):
+ self.clean_x_embedder = HunyuanVideoPatchEmbedForCleanLatents(self.inner_dim)
+ self.config['has_clean_x_embedder'] = True
+
+ def enable_gradient_checkpointing(self):
+ self.use_gradient_checkpointing = True
+ print('self.use_gradient_checkpointing = True')
+
+ def disable_gradient_checkpointing(self):
+ self.use_gradient_checkpointing = False
+ print('self.use_gradient_checkpointing = False')
+
+ def initialize_teacache(self, enable_teacache=True, num_steps=25, rel_l1_thresh=0.15):
+ self.enable_teacache = enable_teacache
+ self.cnt = 0
+ self.num_steps = num_steps
+ self.rel_l1_thresh = rel_l1_thresh # 0.1 for 1.6x speedup, 0.15 for 2.1x speedup
+ self.accumulated_rel_l1_distance = 0
+ self.previous_modulated_input = None
+ self.previous_residual = None
+ self.teacache_rescale_func = np.poly1d([7.33226126e+02, -4.01131952e+02, 6.75869174e+01, -3.14987800e+00, 9.61237896e-02])
+
+ def gradient_checkpointing_method(self, block, *args):
+ if self.use_gradient_checkpointing:
+ result = torch.utils.checkpoint.checkpoint(block, *args, use_reentrant=False)
+ else:
+ result = block(*args)
+ return result
+
+ def process_input_hidden_states(
+ self,
+ latents, latent_indices=None,
+ clean_latents=None, clean_latent_indices=None,
+ clean_latents_2x=None, clean_latent_2x_indices=None,
+ clean_latents_4x=None, clean_latent_4x_indices=None
+ ):
+ hidden_states = self.gradient_checkpointing_method(self.x_embedder.proj, latents)
+ B, C, T, H, W = hidden_states.shape
+
+ if latent_indices is None:
+ latent_indices = torch.arange(0, T).unsqueeze(0).expand(B, -1)
+
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
+
+ rope_freqs = self.rope(frame_indices=latent_indices, height=H, width=W, device=hidden_states.device)
+ rope_freqs = rope_freqs.flatten(2).transpose(1, 2)
+
+ if clean_latents is not None and clean_latent_indices is not None:
+ clean_latents = clean_latents.to(hidden_states)
+ clean_latents = self.gradient_checkpointing_method(self.clean_x_embedder.proj, clean_latents)
+ clean_latents = clean_latents.flatten(2).transpose(1, 2)
+
+ clean_latent_rope_freqs = self.rope(frame_indices=clean_latent_indices, height=H, width=W, device=clean_latents.device)
+ clean_latent_rope_freqs = clean_latent_rope_freqs.flatten(2).transpose(1, 2)
+
+ hidden_states = torch.cat([clean_latents, hidden_states], dim=1)
+ rope_freqs = torch.cat([clean_latent_rope_freqs, rope_freqs], dim=1)
+
+ if clean_latents_2x is not None and clean_latent_2x_indices is not None:
+ clean_latents_2x = clean_latents_2x.to(hidden_states)
+ clean_latents_2x = pad_for_3d_conv(clean_latents_2x, (2, 4, 4))
+ clean_latents_2x = self.gradient_checkpointing_method(self.clean_x_embedder.proj_2x, clean_latents_2x)
+ clean_latents_2x = clean_latents_2x.flatten(2).transpose(1, 2)
+
+ clean_latent_2x_rope_freqs = self.rope(frame_indices=clean_latent_2x_indices, height=H, width=W, device=clean_latents_2x.device)
+ clean_latent_2x_rope_freqs = pad_for_3d_conv(clean_latent_2x_rope_freqs, (2, 2, 2))
+ clean_latent_2x_rope_freqs = center_down_sample_3d(clean_latent_2x_rope_freqs, (2, 2, 2))
+ clean_latent_2x_rope_freqs = clean_latent_2x_rope_freqs.flatten(2).transpose(1, 2)
+
+ hidden_states = torch.cat([clean_latents_2x, hidden_states], dim=1)
+ rope_freqs = torch.cat([clean_latent_2x_rope_freqs, rope_freqs], dim=1)
+
+ if clean_latents_4x is not None and clean_latent_4x_indices is not None:
+ clean_latents_4x = clean_latents_4x.to(hidden_states)
+ clean_latents_4x = pad_for_3d_conv(clean_latents_4x, (4, 8, 8))
+ clean_latents_4x = self.gradient_checkpointing_method(self.clean_x_embedder.proj_4x, clean_latents_4x)
+ clean_latents_4x = clean_latents_4x.flatten(2).transpose(1, 2)
+
+ clean_latent_4x_rope_freqs = self.rope(frame_indices=clean_latent_4x_indices, height=H, width=W, device=clean_latents_4x.device)
+ clean_latent_4x_rope_freqs = pad_for_3d_conv(clean_latent_4x_rope_freqs, (4, 4, 4))
+ clean_latent_4x_rope_freqs = center_down_sample_3d(clean_latent_4x_rope_freqs, (4, 4, 4))
+ clean_latent_4x_rope_freqs = clean_latent_4x_rope_freqs.flatten(2).transpose(1, 2)
+
+ hidden_states = torch.cat([clean_latents_4x, hidden_states], dim=1)
+ rope_freqs = torch.cat([clean_latent_4x_rope_freqs, rope_freqs], dim=1)
+
+ return hidden_states, rope_freqs
+
+ def forward(
+ self,
+ hidden_states, timestep, encoder_hidden_states, encoder_attention_mask, pooled_projections, guidance,
+ latent_indices=None,
+ clean_latents=None, clean_latent_indices=None,
+ clean_latents_2x=None, clean_latent_2x_indices=None,
+ clean_latents_4x=None, clean_latent_4x_indices=None,
+ image_embeddings=None,
+ attention_kwargs=None, return_dict=True
+ ):
+
+ if attention_kwargs is None:
+ attention_kwargs = {}
+
+ batch_size, num_channels, num_frames, height, width = hidden_states.shape
+ p, p_t = self.config['patch_size'], self.config['patch_size_t']
+ post_patch_num_frames = num_frames // p_t
+ post_patch_height = height // p
+ post_patch_width = width // p
+ original_context_length = post_patch_num_frames * post_patch_height * post_patch_width
+
+ hidden_states, rope_freqs = self.process_input_hidden_states(hidden_states, latent_indices, clean_latents, clean_latent_indices, clean_latents_2x, clean_latent_2x_indices, clean_latents_4x, clean_latent_4x_indices)
+
+ temb = self.gradient_checkpointing_method(self.time_text_embed, timestep, guidance, pooled_projections)
+ encoder_hidden_states = self.gradient_checkpointing_method(self.context_embedder, encoder_hidden_states, timestep, encoder_attention_mask)
+
+ if self.image_projection is not None:
+ assert image_embeddings is not None, 'You must use image embeddings!'
+ extra_encoder_hidden_states = self.gradient_checkpointing_method(self.image_projection, image_embeddings)
+ extra_attention_mask = torch.ones((batch_size, extra_encoder_hidden_states.shape[1]), dtype=encoder_attention_mask.dtype, device=encoder_attention_mask.device)
+
+ # must cat before (not after) encoder_hidden_states, due to attn masking
+ encoder_hidden_states = torch.cat([extra_encoder_hidden_states, encoder_hidden_states], dim=1)
+ encoder_attention_mask = torch.cat([extra_attention_mask, encoder_attention_mask], dim=1)
+
+ if batch_size == 1:
+ # When batch size is 1, we do not need any masks or var-len funcs since cropping is mathematically same to what we want
+ # If they are not same, then their impls are wrong. Ours are always the correct one.
+ text_len = encoder_attention_mask.sum().item()
+ encoder_hidden_states = encoder_hidden_states[:, :text_len]
+ attention_mask = None, None, None, None
+ else:
+ img_seq_len = hidden_states.shape[1]
+ txt_seq_len = encoder_hidden_states.shape[1]
+
+ cu_seqlens_q = get_cu_seqlens(encoder_attention_mask, img_seq_len)
+ cu_seqlens_kv = cu_seqlens_q
+ max_seqlen_q = img_seq_len + txt_seq_len
+ max_seqlen_kv = max_seqlen_q
+
+ attention_mask = cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv
+
+ if self.enable_teacache:
+ modulated_inp = self.transformer_blocks[0].norm1(hidden_states, emb=temb)[0]
+
+ if self.cnt == 0 or self.cnt == self.num_steps-1:
+ should_calc = True
+ self.accumulated_rel_l1_distance = 0
+ else:
+ curr_rel_l1 = ((modulated_inp - self.previous_modulated_input).abs().mean() / self.previous_modulated_input.abs().mean()).cpu().item()
+ self.accumulated_rel_l1_distance += self.teacache_rescale_func(curr_rel_l1)
+ should_calc = self.accumulated_rel_l1_distance >= self.rel_l1_thresh
+
+ if should_calc:
+ self.accumulated_rel_l1_distance = 0
+
+ self.previous_modulated_input = modulated_inp
+ self.cnt += 1
+
+ if self.cnt == self.num_steps:
+ self.cnt = 0
+
+ if not should_calc:
+ hidden_states = hidden_states + self.previous_residual
+ else:
+ ori_hidden_states = hidden_states.clone()
+
+ for block_id, block in enumerate(self.transformer_blocks):
+ hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
+ block,
+ hidden_states,
+ encoder_hidden_states,
+ temb,
+ attention_mask,
+ rope_freqs
+ )
+
+ for block_id, block in enumerate(self.single_transformer_blocks):
+ hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
+ block,
+ hidden_states,
+ encoder_hidden_states,
+ temb,
+ attention_mask,
+ rope_freqs
+ )
+
+ self.previous_residual = hidden_states - ori_hidden_states
+ else:
+ for block_id, block in enumerate(self.transformer_blocks):
+ hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
+ block,
+ hidden_states,
+ encoder_hidden_states,
+ temb,
+ attention_mask,
+ rope_freqs
+ )
+
+ for block_id, block in enumerate(self.single_transformer_blocks):
+ hidden_states, encoder_hidden_states = self.gradient_checkpointing_method(
+ block,
+ hidden_states,
+ encoder_hidden_states,
+ temb,
+ attention_mask,
+ rope_freqs
+ )
+
+ hidden_states = self.gradient_checkpointing_method(self.norm_out, hidden_states, temb)
+
+ hidden_states = hidden_states[:, -original_context_length:, :]
+
+ if self.high_quality_fp32_output_for_inference:
+ hidden_states = hidden_states.to(dtype=torch.float32)
+ if self.proj_out.weight.dtype != torch.float32:
+ self.proj_out.to(dtype=torch.float32)
+
+ hidden_states = self.gradient_checkpointing_method(self.proj_out, hidden_states)
+
+ hidden_states = einops.rearrange(hidden_states, 'b (t h w) (c pt ph pw) -> b c (t pt) (h ph) (w pw)',
+ t=post_patch_num_frames, h=post_patch_height, w=post_patch_width,
+ pt=p_t, ph=p, pw=p)
+
+ if return_dict:
+ return Transformer2DModelOutput(sample=hidden_states)
+
+ return hidden_states,
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/pipelines/k_diffusion_hunyuan.py b/exp_code/1_benchmark/FramePack/diffusers_helper/pipelines/k_diffusion_hunyuan.py
new file mode 100644
index 0000000000000000000000000000000000000000..d72b44b859c0042af1e227612edd76fa85880548
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/pipelines/k_diffusion_hunyuan.py
@@ -0,0 +1,120 @@
+import torch
+import math
+
+from diffusers_helper.k_diffusion.uni_pc_fm import sample_unipc
+from diffusers_helper.k_diffusion.wrapper import fm_wrapper
+from diffusers_helper.utils import repeat_to_batch_size
+
+
+def flux_time_shift(t, mu=1.15, sigma=1.0):
+ return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
+
+
+def calculate_flux_mu(context_length, x1=256, y1=0.5, x2=4096, y2=1.15, exp_max=7.0):
+ k = (y2 - y1) / (x2 - x1)
+ b = y1 - k * x1
+ mu = k * context_length + b
+ mu = min(mu, math.log(exp_max))
+ return mu
+
+
+def get_flux_sigmas_from_mu(n, mu):
+ sigmas = torch.linspace(1, 0, steps=n + 1)
+ sigmas = flux_time_shift(sigmas, mu=mu)
+ return sigmas
+
+
+@torch.inference_mode()
+def sample_hunyuan(
+ transformer,
+ sampler='unipc',
+ initial_latent=None,
+ concat_latent=None,
+ strength=1.0,
+ width=512,
+ height=512,
+ frames=16,
+ real_guidance_scale=1.0,
+ distilled_guidance_scale=6.0,
+ guidance_rescale=0.0,
+ shift=None,
+ num_inference_steps=25,
+ batch_size=None,
+ generator=None,
+ prompt_embeds=None,
+ prompt_embeds_mask=None,
+ prompt_poolers=None,
+ negative_prompt_embeds=None,
+ negative_prompt_embeds_mask=None,
+ negative_prompt_poolers=None,
+ dtype=torch.bfloat16,
+ device=None,
+ negative_kwargs=None,
+ callback=None,
+ **kwargs,
+):
+ device = device or transformer.device
+
+ if batch_size is None:
+ batch_size = int(prompt_embeds.shape[0])
+
+ latents = torch.randn((batch_size, 16, (frames + 3) // 4, height // 8, width // 8), generator=generator, device=generator.device).to(device=device, dtype=torch.float32)
+
+ B, C, T, H, W = latents.shape
+ seq_length = T * H * W // 4
+
+ if shift is None:
+ mu = calculate_flux_mu(seq_length, exp_max=7.0)
+ else:
+ mu = math.log(shift)
+
+ sigmas = get_flux_sigmas_from_mu(num_inference_steps, mu).to(device)
+
+ k_model = fm_wrapper(transformer)
+
+ if initial_latent is not None:
+ sigmas = sigmas * strength
+ first_sigma = sigmas[0].to(device=device, dtype=torch.float32)
+ initial_latent = initial_latent.to(device=device, dtype=torch.float32)
+ latents = initial_latent.float() * (1.0 - first_sigma) + latents.float() * first_sigma
+
+ if concat_latent is not None:
+ concat_latent = concat_latent.to(latents)
+
+ distilled_guidance = torch.tensor([distilled_guidance_scale * 1000.0] * batch_size).to(device=device, dtype=dtype)
+
+ prompt_embeds = repeat_to_batch_size(prompt_embeds, batch_size)
+ prompt_embeds_mask = repeat_to_batch_size(prompt_embeds_mask, batch_size)
+ prompt_poolers = repeat_to_batch_size(prompt_poolers, batch_size)
+ negative_prompt_embeds = repeat_to_batch_size(negative_prompt_embeds, batch_size)
+ negative_prompt_embeds_mask = repeat_to_batch_size(negative_prompt_embeds_mask, batch_size)
+ negative_prompt_poolers = repeat_to_batch_size(negative_prompt_poolers, batch_size)
+ concat_latent = repeat_to_batch_size(concat_latent, batch_size)
+
+ sampler_kwargs = dict(
+ dtype=dtype,
+ cfg_scale=real_guidance_scale,
+ cfg_rescale=guidance_rescale,
+ concat_latent=concat_latent,
+ positive=dict(
+ pooled_projections=prompt_poolers,
+ encoder_hidden_states=prompt_embeds,
+ encoder_attention_mask=prompt_embeds_mask,
+ guidance=distilled_guidance,
+ **kwargs,
+ ),
+ negative=dict(
+ pooled_projections=negative_prompt_poolers,
+ encoder_hidden_states=negative_prompt_embeds,
+ encoder_attention_mask=negative_prompt_embeds_mask,
+ guidance=distilled_guidance,
+ **(kwargs if negative_kwargs is None else {**kwargs, **negative_kwargs}),
+ )
+ )
+
+ if sampler == 'unipc':
+ results = sample_unipc(k_model, latents, sigmas, extra_args=sampler_kwargs, disable=False, callback=callback)
+ else:
+ raise NotImplementedError(f'Sampler {sampler} is not supported.')
+
+ return results
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/thread_utils.py b/exp_code/1_benchmark/FramePack/diffusers_helper/thread_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..144fdad6a218b10e77944e927ea350bb84b559a1
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/thread_utils.py
@@ -0,0 +1,76 @@
+import time
+
+from threading import Thread, Lock
+
+
+class Listener:
+ task_queue = []
+ lock = Lock()
+ thread = None
+
+ @classmethod
+ def _process_tasks(cls):
+ while True:
+ task = None
+ with cls.lock:
+ if cls.task_queue:
+ task = cls.task_queue.pop(0)
+
+ if task is None:
+ time.sleep(0.001)
+ continue
+
+ func, args, kwargs = task
+ try:
+ func(*args, **kwargs)
+ except Exception as e:
+ print(f"Error in listener thread: {e}")
+
+ @classmethod
+ def add_task(cls, func, *args, **kwargs):
+ with cls.lock:
+ cls.task_queue.append((func, args, kwargs))
+
+ if cls.thread is None:
+ cls.thread = Thread(target=cls._process_tasks, daemon=True)
+ cls.thread.start()
+
+
+def async_run(func, *args, **kwargs):
+ Listener.add_task(func, *args, **kwargs)
+
+
+class FIFOQueue:
+ def __init__(self):
+ self.queue = []
+ self.lock = Lock()
+
+ def push(self, item):
+ with self.lock:
+ self.queue.append(item)
+
+ def pop(self):
+ with self.lock:
+ if self.queue:
+ return self.queue.pop(0)
+ return None
+
+ def top(self):
+ with self.lock:
+ if self.queue:
+ return self.queue[0]
+ return None
+
+ def next(self):
+ while True:
+ with self.lock:
+ if self.queue:
+ return self.queue.pop(0)
+
+ time.sleep(0.001)
+
+
+class AsyncStream:
+ def __init__(self):
+ self.input_queue = FIFOQueue()
+ self.output_queue = FIFOQueue()
diff --git a/exp_code/1_benchmark/FramePack/diffusers_helper/utils.py b/exp_code/1_benchmark/FramePack/diffusers_helper/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cd7a0c5f8f04960476e893321c52318ea079e14
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/diffusers_helper/utils.py
@@ -0,0 +1,613 @@
+import os
+import cv2
+import json
+import random
+import glob
+import torch
+import einops
+import numpy as np
+import datetime
+import torchvision
+
+import safetensors.torch as sf
+from PIL import Image
+
+
+def min_resize(x, m):
+ if x.shape[0] < x.shape[1]:
+ s0 = m
+ s1 = int(float(m) / float(x.shape[0]) * float(x.shape[1]))
+ else:
+ s0 = int(float(m) / float(x.shape[1]) * float(x.shape[0]))
+ s1 = m
+ new_max = max(s1, s0)
+ raw_max = max(x.shape[0], x.shape[1])
+ if new_max < raw_max:
+ interpolation = cv2.INTER_AREA
+ else:
+ interpolation = cv2.INTER_LANCZOS4
+ y = cv2.resize(x, (s1, s0), interpolation=interpolation)
+ return y
+
+
+def d_resize(x, y):
+ H, W, C = y.shape
+ new_min = min(H, W)
+ raw_min = min(x.shape[0], x.shape[1])
+ if new_min < raw_min:
+ interpolation = cv2.INTER_AREA
+ else:
+ interpolation = cv2.INTER_LANCZOS4
+ y = cv2.resize(x, (W, H), interpolation=interpolation)
+ return y
+
+
+def resize_and_center_crop(image, target_width, target_height):
+ if target_height == image.shape[0] and target_width == image.shape[1]:
+ return image
+
+ pil_image = Image.fromarray(image)
+ original_width, original_height = pil_image.size
+ scale_factor = max(target_width / original_width, target_height / original_height)
+ resized_width = int(round(original_width * scale_factor))
+ resized_height = int(round(original_height * scale_factor))
+ resized_image = pil_image.resize((resized_width, resized_height), Image.LANCZOS)
+ left = (resized_width - target_width) / 2
+ top = (resized_height - target_height) / 2
+ right = (resized_width + target_width) / 2
+ bottom = (resized_height + target_height) / 2
+ cropped_image = resized_image.crop((left, top, right, bottom))
+ return np.array(cropped_image)
+
+
+def resize_and_center_crop_pytorch(image, target_width, target_height):
+ B, C, H, W = image.shape
+
+ if H == target_height and W == target_width:
+ return image
+
+ scale_factor = max(target_width / W, target_height / H)
+ resized_width = int(round(W * scale_factor))
+ resized_height = int(round(H * scale_factor))
+
+ resized = torch.nn.functional.interpolate(image, size=(resized_height, resized_width), mode='bilinear', align_corners=False)
+
+ top = (resized_height - target_height) // 2
+ left = (resized_width - target_width) // 2
+ cropped = resized[:, :, top:top + target_height, left:left + target_width]
+
+ return cropped
+
+
+def resize_without_crop(image, target_width, target_height):
+ if target_height == image.shape[0] and target_width == image.shape[1]:
+ return image
+
+ pil_image = Image.fromarray(image)
+ resized_image = pil_image.resize((target_width, target_height), Image.LANCZOS)
+ return np.array(resized_image)
+
+
+def just_crop(image, w, h):
+ if h == image.shape[0] and w == image.shape[1]:
+ return image
+
+ original_height, original_width = image.shape[:2]
+ k = min(original_height / h, original_width / w)
+ new_width = int(round(w * k))
+ new_height = int(round(h * k))
+ x_start = (original_width - new_width) // 2
+ y_start = (original_height - new_height) // 2
+ cropped_image = image[y_start:y_start + new_height, x_start:x_start + new_width]
+ return cropped_image
+
+
+def write_to_json(data, file_path):
+ temp_file_path = file_path + ".tmp"
+ with open(temp_file_path, 'wt', encoding='utf-8') as temp_file:
+ json.dump(data, temp_file, indent=4)
+ os.replace(temp_file_path, file_path)
+ return
+
+
+def read_from_json(file_path):
+ with open(file_path, 'rt', encoding='utf-8') as file:
+ data = json.load(file)
+ return data
+
+
+def get_active_parameters(m):
+ return {k: v for k, v in m.named_parameters() if v.requires_grad}
+
+
+def cast_training_params(m, dtype=torch.float32):
+ result = {}
+ for n, param in m.named_parameters():
+ if param.requires_grad:
+ param.data = param.to(dtype)
+ result[n] = param
+ return result
+
+
+def separate_lora_AB(parameters, B_patterns=None):
+ parameters_normal = {}
+ parameters_B = {}
+
+ if B_patterns is None:
+ B_patterns = ['.lora_B.', '__zero__']
+
+ for k, v in parameters.items():
+ if any(B_pattern in k for B_pattern in B_patterns):
+ parameters_B[k] = v
+ else:
+ parameters_normal[k] = v
+
+ return parameters_normal, parameters_B
+
+
+def set_attr_recursive(obj, attr, value):
+ attrs = attr.split(".")
+ for name in attrs[:-1]:
+ obj = getattr(obj, name)
+ setattr(obj, attrs[-1], value)
+ return
+
+
+def print_tensor_list_size(tensors):
+ total_size = 0
+ total_elements = 0
+
+ if isinstance(tensors, dict):
+ tensors = tensors.values()
+
+ for tensor in tensors:
+ total_size += tensor.nelement() * tensor.element_size()
+ total_elements += tensor.nelement()
+
+ total_size_MB = total_size / (1024 ** 2)
+ total_elements_B = total_elements / 1e9
+
+ print(f"Total number of tensors: {len(tensors)}")
+ print(f"Total size of tensors: {total_size_MB:.2f} MB")
+ print(f"Total number of parameters: {total_elements_B:.3f} billion")
+ return
+
+
+@torch.no_grad()
+def batch_mixture(a, b=None, probability_a=0.5, mask_a=None):
+ batch_size = a.size(0)
+
+ if b is None:
+ b = torch.zeros_like(a)
+
+ if mask_a is None:
+ mask_a = torch.rand(batch_size) < probability_a
+
+ mask_a = mask_a.to(a.device)
+ mask_a = mask_a.reshape((batch_size,) + (1,) * (a.dim() - 1))
+ result = torch.where(mask_a, a, b)
+ return result
+
+
+@torch.no_grad()
+def zero_module(module):
+ for p in module.parameters():
+ p.detach().zero_()
+ return module
+
+
+@torch.no_grad()
+def supress_lower_channels(m, k, alpha=0.01):
+ data = m.weight.data.clone()
+
+ assert int(data.shape[1]) >= k
+
+ data[:, :k] = data[:, :k] * alpha
+ m.weight.data = data.contiguous().clone()
+ return m
+
+
+def freeze_module(m):
+ if not hasattr(m, '_forward_inside_frozen_module'):
+ m._forward_inside_frozen_module = m.forward
+ m.requires_grad_(False)
+ m.forward = torch.no_grad()(m.forward)
+ return m
+
+
+def get_latest_safetensors(folder_path):
+ safetensors_files = glob.glob(os.path.join(folder_path, '*.safetensors'))
+
+ if not safetensors_files:
+ raise ValueError('No file to resume!')
+
+ latest_file = max(safetensors_files, key=os.path.getmtime)
+ latest_file = os.path.abspath(os.path.realpath(latest_file))
+ return latest_file
+
+
+def generate_random_prompt_from_tags(tags_str, min_length=3, max_length=32):
+ tags = tags_str.split(', ')
+ tags = random.sample(tags, k=min(random.randint(min_length, max_length), len(tags)))
+ prompt = ', '.join(tags)
+ return prompt
+
+
+def interpolate_numbers(a, b, n, round_to_int=False, gamma=1.0):
+ numbers = a + (b - a) * (np.linspace(0, 1, n) ** gamma)
+ if round_to_int:
+ numbers = np.round(numbers).astype(int)
+ return numbers.tolist()
+
+
+def uniform_random_by_intervals(inclusive, exclusive, n, round_to_int=False):
+ edges = np.linspace(0, 1, n + 1)
+ points = np.random.uniform(edges[:-1], edges[1:])
+ numbers = inclusive + (exclusive - inclusive) * points
+ if round_to_int:
+ numbers = np.round(numbers).astype(int)
+ return numbers.tolist()
+
+
+def soft_append_bcthw(history, current, overlap=0):
+ if overlap <= 0:
+ return torch.cat([history, current], dim=2)
+
+ assert history.shape[2] >= overlap, f"History length ({history.shape[2]}) must be >= overlap ({overlap})"
+ assert current.shape[2] >= overlap, f"Current length ({current.shape[2]}) must be >= overlap ({overlap})"
+
+ weights = torch.linspace(1, 0, overlap, dtype=history.dtype, device=history.device).view(1, 1, -1, 1, 1)
+ blended = weights * history[:, :, -overlap:] + (1 - weights) * current[:, :, :overlap]
+ output = torch.cat([history[:, :, :-overlap], blended, current[:, :, overlap:]], dim=2)
+
+ return output.to(history)
+
+
+def save_bcthw_as_mp4(x, output_filename, fps=10, crf=0):
+ b, c, t, h, w = x.shape
+
+ per_row = b
+ for p in [6, 5, 4, 3, 2]:
+ if b % p == 0:
+ per_row = p
+ break
+
+ os.makedirs(os.path.dirname(os.path.abspath(os.path.realpath(output_filename))), exist_ok=True)
+ x = torch.clamp(x.float(), -1., 1.) * 127.5 + 127.5
+ x = x.detach().cpu().to(torch.uint8)
+ x = einops.rearrange(x, '(m n) c t h w -> t (m h) (n w) c', n=per_row)
+ torchvision.io.write_video(output_filename, x, fps=fps, video_codec='libx264', options={'crf': str(int(crf))})
+ return x
+
+
+def save_bcthw_as_png(x, output_filename):
+ os.makedirs(os.path.dirname(os.path.abspath(os.path.realpath(output_filename))), exist_ok=True)
+ x = torch.clamp(x.float(), -1., 1.) * 127.5 + 127.5
+ x = x.detach().cpu().to(torch.uint8)
+ x = einops.rearrange(x, 'b c t h w -> c (b h) (t w)')
+ torchvision.io.write_png(x, output_filename)
+ return output_filename
+
+
+def save_bchw_as_png(x, output_filename):
+ os.makedirs(os.path.dirname(os.path.abspath(os.path.realpath(output_filename))), exist_ok=True)
+ x = torch.clamp(x.float(), -1., 1.) * 127.5 + 127.5
+ x = x.detach().cpu().to(torch.uint8)
+ x = einops.rearrange(x, 'b c h w -> c h (b w)')
+ torchvision.io.write_png(x, output_filename)
+ return output_filename
+
+
+def add_tensors_with_padding(tensor1, tensor2):
+ if tensor1.shape == tensor2.shape:
+ return tensor1 + tensor2
+
+ shape1 = tensor1.shape
+ shape2 = tensor2.shape
+
+ new_shape = tuple(max(s1, s2) for s1, s2 in zip(shape1, shape2))
+
+ padded_tensor1 = torch.zeros(new_shape)
+ padded_tensor2 = torch.zeros(new_shape)
+
+ padded_tensor1[tuple(slice(0, s) for s in shape1)] = tensor1
+ padded_tensor2[tuple(slice(0, s) for s in shape2)] = tensor2
+
+ result = padded_tensor1 + padded_tensor2
+ return result
+
+
+def print_free_mem():
+ torch.cuda.empty_cache()
+ free_mem, total_mem = torch.cuda.mem_get_info(0)
+ free_mem_mb = free_mem / (1024 ** 2)
+ total_mem_mb = total_mem / (1024 ** 2)
+ print(f"Free memory: {free_mem_mb:.2f} MB")
+ print(f"Total memory: {total_mem_mb:.2f} MB")
+ return
+
+
+def print_gpu_parameters(device, state_dict, log_count=1):
+ summary = {"device": device, "keys_count": len(state_dict)}
+
+ logged_params = {}
+ for i, (key, tensor) in enumerate(state_dict.items()):
+ if i >= log_count:
+ break
+ logged_params[key] = tensor.flatten()[:3].tolist()
+
+ summary["params"] = logged_params
+
+ print(str(summary))
+ return
+
+
+def visualize_txt_as_img(width, height, text, font_path='font/DejaVuSans.ttf', size=18):
+ from PIL import Image, ImageDraw, ImageFont
+
+ txt = Image.new("RGB", (width, height), color="white")
+ draw = ImageDraw.Draw(txt)
+ font = ImageFont.truetype(font_path, size=size)
+
+ if text == '':
+ return np.array(txt)
+
+ # Split text into lines that fit within the image width
+ lines = []
+ words = text.split()
+ current_line = words[0]
+
+ for word in words[1:]:
+ line_with_word = f"{current_line} {word}"
+ if draw.textbbox((0, 0), line_with_word, font=font)[2] <= width:
+ current_line = line_with_word
+ else:
+ lines.append(current_line)
+ current_line = word
+
+ lines.append(current_line)
+
+ # Draw the text line by line
+ y = 0
+ line_height = draw.textbbox((0, 0), "A", font=font)[3]
+
+ for line in lines:
+ if y + line_height > height:
+ break # stop drawing if the next line will be outside the image
+ draw.text((0, y), line, fill="black", font=font)
+ y += line_height
+
+ return np.array(txt)
+
+
+def blue_mark(x):
+ x = x.copy()
+ c = x[:, :, 2]
+ b = cv2.blur(c, (9, 9))
+ x[:, :, 2] = ((c - b) * 16.0 + b).clip(-1, 1)
+ return x
+
+
+def green_mark(x):
+ x = x.copy()
+ x[:, :, 2] = -1
+ x[:, :, 0] = -1
+ return x
+
+
+def frame_mark(x):
+ x = x.copy()
+ x[:64] = -1
+ x[-64:] = -1
+ x[:, :8] = 1
+ x[:, -8:] = 1
+ return x
+
+
+@torch.inference_mode()
+def pytorch2numpy(imgs):
+ results = []
+ for x in imgs:
+ y = x.movedim(0, -1)
+ y = y * 127.5 + 127.5
+ y = y.detach().float().cpu().numpy().clip(0, 255).astype(np.uint8)
+ results.append(y)
+ return results
+
+
+@torch.inference_mode()
+def numpy2pytorch(imgs):
+ h = torch.from_numpy(np.stack(imgs, axis=0)).float() / 127.5 - 1.0
+ h = h.movedim(-1, 1)
+ return h
+
+
+@torch.no_grad()
+def duplicate_prefix_to_suffix(x, count, zero_out=False):
+ if zero_out:
+ return torch.cat([x, torch.zeros_like(x[:count])], dim=0)
+ else:
+ return torch.cat([x, x[:count]], dim=0)
+
+
+def weighted_mse(a, b, weight):
+ return torch.mean(weight.float() * (a.float() - b.float()) ** 2)
+
+
+def clamped_linear_interpolation(x, x_min, y_min, x_max, y_max, sigma=1.0):
+ x = (x - x_min) / (x_max - x_min)
+ x = max(0.0, min(x, 1.0))
+ x = x ** sigma
+ return y_min + x * (y_max - y_min)
+
+
+def expand_to_dims(x, target_dims):
+ return x.view(*x.shape, *([1] * max(0, target_dims - x.dim())))
+
+
+def repeat_to_batch_size(tensor: torch.Tensor, batch_size: int):
+ if tensor is None:
+ return None
+
+ first_dim = tensor.shape[0]
+
+ if first_dim == batch_size:
+ return tensor
+
+ if batch_size % first_dim != 0:
+ raise ValueError(f"Cannot evenly repeat first dim {first_dim} to match batch_size {batch_size}.")
+
+ repeat_times = batch_size // first_dim
+
+ return tensor.repeat(repeat_times, *[1] * (tensor.dim() - 1))
+
+
+def dim5(x):
+ return expand_to_dims(x, 5)
+
+
+def dim4(x):
+ return expand_to_dims(x, 4)
+
+
+def dim3(x):
+ return expand_to_dims(x, 3)
+
+
+def crop_or_pad_yield_mask(x, length):
+ B, F, C = x.shape
+ device = x.device
+ dtype = x.dtype
+
+ if F < length:
+ y = torch.zeros((B, length, C), dtype=dtype, device=device)
+ mask = torch.zeros((B, length), dtype=torch.bool, device=device)
+ y[:, :F, :] = x
+ mask[:, :F] = True
+ return y, mask
+
+ return x[:, :length, :], torch.ones((B, length), dtype=torch.bool, device=device)
+
+
+def extend_dim(x, dim, minimal_length, zero_pad=False):
+ original_length = int(x.shape[dim])
+
+ if original_length >= minimal_length:
+ return x
+
+ if zero_pad:
+ padding_shape = list(x.shape)
+ padding_shape[dim] = minimal_length - original_length
+ padding = torch.zeros(padding_shape, dtype=x.dtype, device=x.device)
+ else:
+ idx = (slice(None),) * dim + (slice(-1, None),) + (slice(None),) * (len(x.shape) - dim - 1)
+ last_element = x[idx]
+ padding = last_element.repeat_interleave(minimal_length - original_length, dim=dim)
+
+ return torch.cat([x, padding], dim=dim)
+
+
+def lazy_positional_encoding(t, repeats=None):
+ if not isinstance(t, list):
+ t = [t]
+
+ from diffusers.models.embeddings import get_timestep_embedding
+
+ te = torch.tensor(t)
+ te = get_timestep_embedding(timesteps=te, embedding_dim=256, flip_sin_to_cos=True, downscale_freq_shift=0.0, scale=1.0)
+
+ if repeats is None:
+ return te
+
+ te = te[:, None, :].expand(-1, repeats, -1)
+
+ return te
+
+
+def state_dict_offset_merge(A, B, C=None):
+ result = {}
+ keys = A.keys()
+
+ for key in keys:
+ A_value = A[key]
+ B_value = B[key].to(A_value)
+
+ if C is None:
+ result[key] = A_value + B_value
+ else:
+ C_value = C[key].to(A_value)
+ result[key] = A_value + B_value - C_value
+
+ return result
+
+
+def state_dict_weighted_merge(state_dicts, weights):
+ if len(state_dicts) != len(weights):
+ raise ValueError("Number of state dictionaries must match number of weights")
+
+ if not state_dicts:
+ return {}
+
+ total_weight = sum(weights)
+
+ if total_weight == 0:
+ raise ValueError("Sum of weights cannot be zero")
+
+ normalized_weights = [w / total_weight for w in weights]
+
+ keys = state_dicts[0].keys()
+ result = {}
+
+ for key in keys:
+ result[key] = state_dicts[0][key] * normalized_weights[0]
+
+ for i in range(1, len(state_dicts)):
+ state_dict_value = state_dicts[i][key].to(result[key])
+ result[key] += state_dict_value * normalized_weights[i]
+
+ return result
+
+
+def group_files_by_folder(all_files):
+ grouped_files = {}
+
+ for file in all_files:
+ folder_name = os.path.basename(os.path.dirname(file))
+ if folder_name not in grouped_files:
+ grouped_files[folder_name] = []
+ grouped_files[folder_name].append(file)
+
+ list_of_lists = list(grouped_files.values())
+ return list_of_lists
+
+
+def generate_timestamp():
+ now = datetime.datetime.now()
+ timestamp = now.strftime('%y%m%d_%H%M%S')
+ milliseconds = f"{int(now.microsecond / 1000):03d}"
+ random_number = random.randint(0, 9999)
+ return f"{timestamp}_{milliseconds}_{random_number}"
+
+
+def write_PIL_image_with_png_info(image, metadata, path):
+ from PIL.PngImagePlugin import PngInfo
+
+ png_info = PngInfo()
+ for key, value in metadata.items():
+ png_info.add_text(key, value)
+
+ image.save(path, "PNG", pnginfo=png_info)
+ return image
+
+
+def torch_safe_save(content, path):
+ torch.save(content, path + '_tmp')
+ os.replace(path + '_tmp', path)
+ return path
+
+
+def move_optimizer_to_device(optimizer, device):
+ for state in optimizer.state.values():
+ for k, v in state.items():
+ if isinstance(v, torch.Tensor):
+ state[k] = v.to(device)
diff --git a/exp_code/1_benchmark/FramePack/requirements.txt b/exp_code/1_benchmark/FramePack/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bf5ef61a7f0e8f49dd5afd8908e123f5f831166a
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack/requirements.txt
@@ -0,0 +1,24 @@
+accelerate
+transformers
+git+https://github.com/huggingface/diffusers
+# accelerate==1.6.0
+# transformers==4.46.2
+# diffusers==0.33.1
+gradio==5.23.0
+sentencepiece==0.2.0
+pillow==11.1.0
+av==12.1.0
+numpy==1.26.2
+scipy==1.12.0
+requests==2.31.0
+torchsde==0.2.6
+
+einops
+opencv-contrib-python
+safetensors
+
+packaging
+ninja
+sageattention==1.0.6
+nvitop
+modelscope
diff --git a/exp_code/1_benchmark/FramePack_diffusers/infer_f1.py b/exp_code/1_benchmark/FramePack_diffusers/infer_f1.py
new file mode 100644
index 0000000000000000000000000000000000000000..be52172e35fa62b202621924898654c55ca49a6e
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack_diffusers/infer_f1.py
@@ -0,0 +1,47 @@
+import torch
+from diffusers import HunyuanVideoFramepackPipeline, HunyuanVideoFramepackTransformer3DModel
+from diffusers.hooks import apply_group_offloading
+from diffusers.utils import export_to_video, load_image
+from transformers import SiglipImageProcessor, SiglipVisionModel
+
+transformer = HunyuanVideoFramepackTransformer3DModel.from_pretrained(
+ "/mnt/workspace/checkpoints/lllyasviel/FramePack_F1_I2V_HY_20250503", torch_dtype=torch.bfloat16
+)
+feature_extractor = SiglipImageProcessor.from_pretrained(
+ "/mnt/workspace/checkpoints/lllyasviel/flux_redux_bfl", subfolder="feature_extractor"
+)
+image_encoder = SiglipVisionModel.from_pretrained(
+ "/mnt/workspace/checkpoints/lllyasviel/flux_redux_bfl", subfolder="image_encoder", torch_dtype=torch.float16
+)
+pipe = HunyuanVideoFramepackPipeline.from_pretrained(
+ "/mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo",
+ transformer=transformer,
+ feature_extractor=feature_extractor,
+ image_encoder=image_encoder,
+ torch_dtype=torch.float16,
+)
+
+onload_device = torch.device("cuda")
+offload_device = torch.device("cpu")
+list(map(
+ lambda x: apply_group_offloading(x, onload_device, offload_device, offload_type="leaf_level", use_stream=True, low_cpu_mem_usage=True),
+ [pipe.text_encoder, pipe.text_encoder_2, pipe.transformer]
+))
+pipe.image_encoder.to(onload_device)
+pipe.vae.to(onload_device)
+pipe.vae.enable_tiling()
+
+image = load_image("penguin.png")
+output = pipe(
+ image=image,
+ prompt="A penguin dancing in the snow",
+ height=832,
+ width=480,
+ num_frames=91,
+ num_inference_steps=30,
+ guidance_scale=9.0,
+ generator=torch.Generator().manual_seed(0),
+ sampling_type="vanilla",
+).frames[0]
+print(f"Max memory: {torch.cuda.max_memory_allocated() / 1024**3:.3f} GB")
+export_to_video(output, "output.mp4", fps=30)
\ No newline at end of file
diff --git a/exp_code/1_benchmark/FramePack_diffusers/infer_flf2v.py b/exp_code/1_benchmark/FramePack_diffusers/infer_flf2v.py
new file mode 100644
index 0000000000000000000000000000000000000000..8812355688d1f8b0d95c0af763287cf25e461851
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack_diffusers/infer_flf2v.py
@@ -0,0 +1,42 @@
+import torch
+from diffusers import HunyuanVideoFramepackPipeline, HunyuanVideoFramepackTransformer3DModel
+from diffusers.utils import export_to_video, load_image
+from transformers import SiglipImageProcessor, SiglipVisionModel
+
+transformer = HunyuanVideoFramepackTransformer3DModel.from_pretrained(
+ "/mnt/workspace/checkpoints/lllyasviel/FramePackI2V_HY", torch_dtype=torch.bfloat16
+)
+feature_extractor = SiglipImageProcessor.from_pretrained(
+ "/mnt/workspace/checkpoints/lllyasviel/flux_redux_bfl", subfolder="feature_extractor"
+)
+image_encoder = SiglipVisionModel.from_pretrained(
+ "/mnt/workspace/checkpoints/lllyasviel/flux_redux_bfl", subfolder="image_encoder", torch_dtype=torch.float16
+)
+pipe = HunyuanVideoFramepackPipeline.from_pretrained(
+ "/mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo",
+ transformer=transformer,
+ feature_extractor=feature_extractor,
+ image_encoder=image_encoder,
+ torch_dtype=torch.float16,
+)
+pipe.to("cuda")
+
+prompt = "CG animation style, a small blue bird takes off from the ground, flapping its wings. The bird's feathers are delicate, with a unique pattern on its chest. The background shows a blue sky with white clouds under bright sunshine. The camera follows the bird upward, capturing its flight and the vastness of the sky from a close-up, low-angle perspective."
+first_image = load_image(
+ "flf2v_input_first_frame.png"
+)
+last_image = load_image(
+ "flf2v_input_last_frame.png"
+)
+output = pipe(
+ image=first_image,
+ last_image=last_image,
+ prompt=prompt,
+ height=512,
+ width=512,
+ num_frames=91,
+ num_inference_steps=30,
+ guidance_scale=9.0,
+ generator=torch.Generator().manual_seed(0),
+).frames[0]
+export_to_video(output, "output.mp4", fps=30)
\ No newline at end of file
diff --git a/exp_code/1_benchmark/FramePack_diffusers/infer_i2v.py b/exp_code/1_benchmark/FramePack_diffusers/infer_i2v.py
new file mode 100644
index 0000000000000000000000000000000000000000..84e747647c4cd678aa95790aece147b030d368d5
--- /dev/null
+++ b/exp_code/1_benchmark/FramePack_diffusers/infer_i2v.py
@@ -0,0 +1,36 @@
+import torch
+from diffusers import HunyuanVideoFramepackPipeline, HunyuanVideoFramepackTransformer3DModel
+from diffusers.utils import export_to_video, load_image
+from transformers import SiglipImageProcessor, SiglipVisionModel
+
+transformer = HunyuanVideoFramepackTransformer3DModel.from_pretrained(
+ "/mnt/workspace/checkpoints/lllyasviel/FramePackI2V_HY", torch_dtype=torch.bfloat16
+)
+feature_extractor = SiglipImageProcessor.from_pretrained(
+ "/mnt/workspace/checkpoints/lllyasviel/flux_redux_bfl", subfolder="feature_extractor"
+)
+image_encoder = SiglipVisionModel.from_pretrained(
+ "/mnt/workspace/checkpoints/lllyasviel/flux_redux_bfl", subfolder="image_encoder", torch_dtype=torch.float16
+)
+pipe = HunyuanVideoFramepackPipeline.from_pretrained(
+ "/mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo",
+ transformer=transformer,
+ feature_extractor=feature_extractor,
+ image_encoder=image_encoder,
+ torch_dtype=torch.float16,
+)
+pipe.vae.enable_tiling()
+pipe.to("cuda")
+
+image = load_image("penguin.png")
+output = pipe(
+ image=image,
+ prompt="A penguin dancing in the snow",
+ height=832,
+ width=480,
+ num_frames=91,
+ num_inference_steps=30,
+ guidance_scale=9.0,
+ generator=torch.Generator().manual_seed(0),
+).frames[0]
+export_to_video(output, "output.mp4", fps=30)
\ No newline at end of file
diff --git a/exp_code/1_benchmark/Open-Sora_v12/.github/workflows/close_issue.yaml b/exp_code/1_benchmark/Open-Sora_v12/.github/workflows/close_issue.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3b6e24c506c7ecddbf1218ee34ff17c6439b0c52
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/.github/workflows/close_issue.yaml
@@ -0,0 +1,22 @@
+name: Close inactive issues
+on:
+ schedule:
+ - cron: "30 1 * * *"
+
+jobs:
+ close-issues:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ pull-requests: write
+ steps:
+ - uses: actions/stale@v9
+ with:
+ days-before-issue-stale: 7
+ days-before-issue-close: 7
+ stale-issue-label: "stale"
+ stale-issue-message: "This issue is stale because it has been open for 7 days with no activity."
+ close-issue-message: "This issue was closed because it has been inactive for 7 days since being marked as stale."
+ days-before-pr-stale: -1
+ days-before-pr-close: -1
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/exp_code/1_benchmark/Open-Sora_v12/.github/workflows/github_page.yaml b/exp_code/1_benchmark/Open-Sora_v12/.github/workflows/github_page.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..483c2adc297eb55945cfe3d9fe7829cbb2d3fd07
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/.github/workflows/github_page.yaml
@@ -0,0 +1,30 @@
+name: GitHub Pages
+
+on:
+ workflow_dispatch:
+
+jobs:
+ deploy:
+ runs-on: ubuntu-22.04
+ permissions:
+ contents: write
+ concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ ref: gallery
+
+ - name: Setup Node
+ uses: actions/setup-node@v4
+ with:
+ node-version: 20
+
+ - run: npm install
+ - run: npm run build
+
+ - name: Deploy
+ uses: peaceiris/actions-gh-pages@v3
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: ./build
diff --git a/exp_code/1_benchmark/Open-Sora_v12/.pre-commit-config.yaml b/exp_code/1_benchmark/Open-Sora_v12/.pre-commit-config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bce9cb8adb07419e5d3806d121f8e11854b0ff42
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/.pre-commit-config.yaml
@@ -0,0 +1,31 @@
+repos:
+
+ - repo: https://github.com/PyCQA/autoflake
+ rev: v2.2.1
+ hooks:
+ - id: autoflake
+ name: autoflake (python)
+ args: ['--in-place']
+
+ - repo: https://github.com/pycqa/isort
+ rev: 5.12.0
+ hooks:
+ - id: isort
+ name: sort all imports (python)
+
+ - repo: https://github.com/psf/black-pre-commit-mirror
+ rev: 23.9.1
+ hooks:
+ - id: black
+ name: black formatter
+
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.3.0
+ hooks:
+ - id: check-yaml
+ - id: check-merge-conflict
+ - id: check-case-conflict
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: mixed-line-ending
+ args: ['--fix=lf']
diff --git a/exp_code/1_benchmark/Open-Sora_v12/CONTRIBUTING.md b/exp_code/1_benchmark/Open-Sora_v12/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..2acbec41b910e2906e859e8fceee5e48d482fc15
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/CONTRIBUTING.md
@@ -0,0 +1,100 @@
+# Contributing
+
+The Open-Sora project welcomes any constructive contribution from the community and the team is more than willing to work on problems you have encountered to make it a better project.
+
+## Development Environment Setup
+
+To contribute to Open-Sora, we would like to first guide you to set up a proper development environment so that you can better implement your code. You can install this library from source with the `editable` flag (`-e`, for development mode) so that your change to the source code will be reflected in runtime without re-installation.
+
+You can refer to the [Installation Section](./README.md#installation) and replace `pip install -v .` with `pip install -v -e .`.
+
+### Code Style
+
+We have some static checks when you commit your code change, please make sure you can pass all the tests and make sure the coding style meets our requirements. We use pre-commit hook to make sure the code is aligned with the writing standard. To set up the code style checking, you need to follow the steps below.
+
+```shell
+# these commands are executed under the Open-Sora directory
+pip install pre-commit
+pre-commit install
+```
+
+Code format checking will be automatically executed when you commit your changes.
+
+## Contribution Guide
+
+You need to follow these steps below to make contribution to the main repository via pull request. You can learn about the details of pull request [here](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests).
+
+### 1. Fork the Official Repository
+
+Firstly, you need to visit the [Open-Sora repository](https://github.com/hpcaitech/Open-Sora) and fork into your own account. The `fork` button is at the right top corner of the web page alongside with buttons such as `watch` and `star`.
+
+Now, you can clone your own forked repository into your local environment.
+
+```shell
+git clone https://github.com//Open-Sora.git
+```
+
+### 2. Configure Git
+
+You need to set the official repository as your upstream so that you can synchronize with the latest update in the official repository. You can learn about upstream [here](https://www.atlassian.com/git/tutorials/git-forks-and-upstreams).
+
+Then add the original repository as upstream
+
+```shell
+cd Open-Sora
+git remote add upstream https://github.com/hpcaitech/Open-Sora.git
+```
+
+you can use the following command to verify that the remote is set. You should see both `origin` and `upstream` in the output.
+
+```shell
+git remote -v
+```
+
+### 3. Synchronize with Official Repository
+
+Before you make changes to the codebase, it is always good to fetch the latest updates in the official repository. In order to do so, you can use the commands below.
+
+```shell
+git fetch upstream
+git checkout main
+git merge upstream/main
+git push origin main
+```
+
+### 5. Create a New Branch
+
+You should not make changes to the `main` branch of your forked repository as this might make upstream synchronization difficult. You can create a new branch with the appropriate name. General branch name format should start with `hotfix/` and `feature/`. `hotfix` is for bug fix and `feature` is for addition of a new feature.
+
+```shell
+git checkout -b
+```
+
+### 6. Implementation and Code Commit
+
+Now you can implement your code change in the source code. Remember that you installed the system in development, thus you do not need to uninstall and install to make the code take effect. The code change will be reflected in every new PyThon execution.
+You can commit and push the changes to your local repository. The changes should be kept logical, modular and atomic.
+
+```shell
+git add -A
+git commit -m ""
+git push -u origin
+```
+
+### 7. Open a Pull Request
+
+You can now create a pull request on the GitHub webpage of your repository. The source branch is `` of your repository and the target branch should be `main` of `hpcaitech/Open-Sora`. After creating this pull request, you should be able to see it [here](https://github.com/hpcaitech/Open-Sora/pulls).
+
+The Open-Sora team will review your code change and merge your code if applicable.
+
+## FQA
+
+1. `pylint` cannot recognize some members:
+
+Add this into your `settings.json` in VSCode:
+
+```json
+"pylint.args": [
+ "--generated-members=numpy.* ,torch.*,cv2.*",
+],
+```
diff --git a/exp_code/1_benchmark/Open-Sora_v12/Dockerfile b/exp_code/1_benchmark/Open-Sora_v12/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..5740556c1579d7d3d9dbef9a6fece0fcb740851f
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/Dockerfile
@@ -0,0 +1,26 @@
+FROM hpcaitech/pytorch-cuda:2.1.0-12.1.0
+
+# metainformation
+LABEL org.opencontainers.image.source = "https://github.com/hpcaitech/Open-Sora"
+LABEL org.opencontainers.image.licenses = "Apache License 2.0"
+LABEL org.opencontainers.image.base.name = "docker.io/library/hpcaitech/pytorch-cuda:2.1.0-12.1.0"
+
+# Set the working directory
+WORKDIR /workspace/Open-Sora
+# Copy the current directory contents into the container at /workspace/Open-Sora
+COPY . .
+
+# inatall library dependencies
+RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 -y
+
+# install flash attention
+RUN pip install flash-attn --no-build-isolation
+
+# install apex
+RUN pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" git+https://github.com/NVIDIA/apex.git
+
+# install xformers
+RUN pip install xformers --index-url https://download.pytorch.org/whl/cu121
+
+# install this project
+RUN pip install -v .
diff --git a/exp_code/1_benchmark/Open-Sora_v12/LICENSE b/exp_code/1_benchmark/Open-Sora_v12/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..e7f2aa21cc2208540857beb3abd9d54055d842ca
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/LICENSE
@@ -0,0 +1,680 @@
+Copyright 2024 HPC-AI Technology Inc. All rights reserved.
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2024 HPC-AI Technology Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ =========================================================================
+ This project is inspired by the listed projects and is subject to the following licenses:
+
+ 1. Latte (https://github.com/Vchitect/Latte/blob/main/LICENSE)
+
+ Copyright 2024 Latte
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ 2. PixArt-alpha (https://github.com/PixArt-alpha/PixArt-alpha/blob/master/LICENSE)
+
+ Copyright (C) 2024 PixArt-alpha/PixArt-alpha
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published
+ by the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+
+ 3. dpm-solver (https://github.com/LuChengTHU/dpm-solver/blob/main/LICENSE)
+
+ MIT License
+
+ Copyright (c) 2022 Cheng Lu
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+
+ 4. DiT (https://github.com/facebookresearch/DiT/blob/main/LICENSE.txt)
+
+ Attribution-NonCommercial 4.0 International
+
+ =======================================================================
+
+ Creative Commons Corporation ("Creative Commons") is not a law firm and
+ does not provide legal services or legal advice. Distribution of
+ Creative Commons public licenses does not create a lawyer-client or
+ other relationship. Creative Commons makes its licenses and related
+ information available on an "as-is" basis. Creative Commons gives no
+ warranties regarding its licenses, any material licensed under their
+ terms and conditions, or any related information. Creative Commons
+ disclaims all liability for damages resulting from their use to the
+ fullest extent possible.
+
+ Using Creative Commons Public Licenses
+
+ Creative Commons public licenses provide a standard set of terms and
+ conditions that creators and other rights holders may use to share
+ original works of authorship and other material subject to copyright
+ and certain other rights specified in the public license below. The
+ following considerations are for informational purposes only, are not
+ exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+ =======================================================================
+
+ Creative Commons Attribution-NonCommercial 4.0 International Public
+ License
+
+ By exercising the Licensed Rights (defined below), You accept and agree
+ to be bound by the terms and conditions of this Creative Commons
+ Attribution-NonCommercial 4.0 International Public License ("Public
+ License"). To the extent this Public License may be interpreted as a
+ contract, You are granted the Licensed Rights in consideration of Your
+ acceptance of these terms and conditions, and the Licensor grants You
+ such rights in consideration of benefits the Licensor receives from
+ making the Licensed Material available under these terms and
+ conditions.
+
+ Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+ d. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ e. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ f. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ g. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ h. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ i. NonCommercial means not primarily intended for or directed towards
+ commercial advantage or monetary compensation. For purposes of
+ this Public License, the exchange of the Licensed Material for
+ other material subject to Copyright and Similar Rights by digital
+ file-sharing or similar means is NonCommercial provided there is
+ no payment of monetary compensation in connection with the
+ exchange.
+
+ j. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ k. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ l. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+ Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part, for NonCommercial purposes only; and
+
+ b. produce, reproduce, and Share Adapted Material for
+ NonCommercial purposes only.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties, including when
+ the Licensed Material is used other than for NonCommercial
+ purposes.
+
+ Section 3 -- License Conditions.
+
+ Your exercise of the Licensed Rights is expressly made subject to the
+ following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ 4. If You Share Adapted Material You produce, the Adapter's
+ License You apply must not prevent recipients of the Adapted
+ Material from complying with this Public License.
+
+ Section 4 -- Sui Generis Database Rights.
+
+ Where the Licensed Rights include Sui Generis Database Rights that
+ apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database for NonCommercial purposes
+ only;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material; and
+
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+ For the avoidance of doubt, this Section 4 supplements and does not
+ replace Your obligations under this Public License where the Licensed
+ Rights include other Copyright and Similar Rights.
+
+ Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+ Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+ Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+ Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+ =======================================================================
+
+ Creative Commons is not a party to its public
+ licenses. Notwithstanding, Creative Commons may elect to apply one of
+ its public licenses to material it publishes and in those instances
+ will be considered the “Licensor.” The text of the Creative Commons
+ public licenses is dedicated to the public domain under the CC0 Public
+ Domain Dedication. Except for the limited purpose of indicating that
+ material is shared under a Creative Commons public license or as
+ otherwise permitted by the Creative Commons policies published at
+ creativecommons.org/policies, Creative Commons does not authorize the
+ use of the trademark "Creative Commons" or any other trademark or logo
+ of Creative Commons without its prior written consent including,
+ without limitation, in connection with any unauthorized modifications
+ to any of its public licenses or any other arrangements,
+ understandings, or agreements concerning use of licensed material. For
+ the avoidance of doubt, this paragraph does not form part of the
+ public licenses.
+
+ Creative Commons may be contacted at creativecommons.org.
+
+ 5. OpenDiT (https://github.com/NUS-HPC-AI-Lab/OpenDiT/blob/master/LICENSE)
+
+ Copyright OpenDiT
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/exp_code/1_benchmark/Open-Sora_v12/README.md b/exp_code/1_benchmark/Open-Sora_v12/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ab7bf35b276b00211a17bb751131ac912fd84f7d
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/README.md
@@ -0,0 +1,587 @@
+
+
+
+
+
+## Open-Sora: Democratizing Efficient Video Production for All
+
+We design and implement **Open-Sora**, an initiative dedicated to **efficiently** producing high-quality video. We hope to make the model,
+tools and all details accessible to all. By embracing **open-source** principles,
+Open-Sora not only democratizes access to advanced video generation techniques, but also offers a
+streamlined and user-friendly platform that simplifies the complexities of video generation.
+With Open-Sora, our goal is to foster innovation, creativity, and inclusivity within the field of content creation.
+
+[[中文文档](/docs/zh_CN/README.md)] [[潞晨云](https://cloud.luchentech.com/)|[OpenSora镜像](https://cloud.luchentech.com/doc/docs/image/open-sora/)|[视频教程](https://www.bilibili.com/video/BV1ow4m1e7PX/?vd_source=c6b752764cd36ff0e535a768e35d98d2)]
+
+## 📰 News
+
+- **[2024.06.17]** 🔥 We released **Open-Sora 1.2**, which includes **3D-VAE**, **rectified flow**, and **score condition**. The video quality is greatly improved. [[checkpoints]](#open-sora-10-model-weights) [[report]](/docs/report_03.md) [[blog]](https://hpc-ai.com/blog/open-sora-from-hpc-ai-tech-team-continues-open-source-generate-any-16-second-720p-hd-video-with-one-click-model-weights-ready-to-use)
+- **[2024.04.25]** 🤗 We released the [Gradio demo for Open-Sora](https://huggingface.co/spaces/hpcai-tech/open-sora) on Hugging Face Spaces.
+- **[2024.04.25]** We released **Open-Sora 1.1**, which supports **2s~15s, 144p to 720p, any aspect ratio** text-to-image, **text-to-video, image-to-video, video-to-video, infinite time** generation. In addition, a full video processing pipeline is released. [[checkpoints]]() [[report]](/docs/report_02.md)
+- **[2024.03.18]** We released **Open-Sora 1.0**, a fully open-source project for video generation.
+ Open-Sora 1.0 supports a full pipeline of video data preprocessing, training with
+
+ acceleration,
+ inference, and more. Our model can produce 2s 512x512 videos with only 3 days training. [[checkpoints]](#open-sora-10-model-weights)
+ [[blog]](https://hpc-ai.com/blog/open-sora-v1.0) [[report]](/docs/report_01.md)
+- **[2024.03.04]** Open-Sora provides training with 46% cost reduction.
+ [[blog]](https://hpc-ai.com/blog/open-sora)
+
+## 🎥 Latest Demo
+
+🔥 You can experience Open-Sora on our [🤗 Gradio application on Hugging Face](https://huggingface.co/spaces/hpcai-tech/open-sora). More samples and corresponding prompts are available in our [Gallery](https://hpcaitech.github.io/Open-Sora/).
+
+| **4s 720×1280** | **4s 720×1280** | **4s 720×1280** |
+| ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/7895aab6-ed23-488c-8486-091480c26327) | [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/20f07c7b-182b-4562-bbee-f1df74c86c9a) | [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/3d897e0d-dc21-453a-b911-b3bda838acc2) |
+| [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/644bf938-96ce-44aa-b797-b3c0b513d64c) | [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/272d88ac-4b4a-484d-a665-8d07431671d0) | [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/ebbac621-c34e-4bb4-9543-1c34f8989764) |
+| [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/a1e3a1a3-4abd-45f5-8df2-6cced69da4ca) | [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/d6ce9c13-28e1-4dff-9644-cc01f5f11926) | [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/561978f8-f1b0-4f4d-ae7b-45bec9001b4a) |
+
+
+OpenSora 1.1 Demo
+
+| **2s 240×426** | **2s 240×426** |
+| ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [
](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/c31ebc52-de39-4a4e-9b1e-9211d45e05b2) | [
](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/c31ebc52-de39-4a4e-9b1e-9211d45e05b2) |
+| [
](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/f7ce4aaa-528f-40a8-be7a-72e61eaacbbd) | [
](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/5d58d71e-1fda-4d90-9ad3-5f2f7b75c6a9) |
+
+| **2s 426×240** | **4s 480×854** |
+| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [
](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/34ecb4a0-4eef-4286-ad4c-8e3a87e5a9fd) | [
](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/c1619333-25d7-42ba-a91c-18dbc1870b18) |
+
+| **16s 320×320** | **16s 224×448** | **2s 426×240** |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/3cab536e-9b43-4b33-8da8-a0f9cf842ff2) | [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/9fb0b9e0-c6f4-4935-b29e-4cac10b373c4) | [
](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/3e892ad2-9543-4049-b005-643a4c1bf3bf) |
+
+
+
+
+OpenSora 1.0 Demo
+
+| **2s 512×512** | **2s 512×512** | **2s 512×512** |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/de1963d3-b43b-4e68-a670-bb821ebb6f80) | [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/13f8338f-3d42-4b71-8142-d234fbd746cc) | [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/fa6a65a6-e32a-4d64-9a9e-eabb0ebb8c16) |
+| A serene night scene in a forested area. [...] The video is a time-lapse, capturing the transition from day to night, with the lake and forest serving as a constant backdrop. | A soaring drone footage captures the majestic beauty of a coastal cliff, [...] The water gently laps at the rock base and the greenery that clings to the top of the cliff. | The majestic beauty of a waterfall cascading down a cliff into a serene lake. [...] The camera angle provides a bird's eye view of the waterfall. |
+| [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/64232f84-1b36-4750-a6c0-3e610fa9aa94) | [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/983a1965-a374-41a7-a76b-c07941a6c1e9) | [
](https://github.com/hpcaitech/Open-Sora/assets/99191637/ec10c879-9767-4c31-865f-2e8d6cf11e65) |
+| A bustling city street at night, filled with the glow of car headlights and the ambient light of streetlights. [...] | The vibrant beauty of a sunflower field. The sunflowers are arranged in neat rows, creating a sense of order and symmetry. [...] | A serene underwater scene featuring a sea turtle swimming through a coral reef. The turtle, with its greenish-brown shell [...] |
+
+Videos are downsampled to `.gif` for display. Click for original videos. Prompts are trimmed for display,
+see [here](/assets/texts/t2v_samples.txt) for full prompts.
+
+
+
+## 🔆 New Features/Updates
+
+- 📍 **Open-Sora 1.2** released. Model weights are available [here](#model-weights). See our **[report 1.2](/docs/report_03.md)** for more details.
+- ✅ Support rectified flow scheduling.
+- ✅ Support more conditioning including fps, aesthetic score, motion strength and camera motion.
+- ✅ Trained our 3D-VAE for temporal dimension compression.
+- 📍 **Open-Sora 1.1** released. Model weights are available [here](#model-weights). It is trained on **0s~15s, 144p to 720p, various aspect ratios** videos. See our **[report 1.1](/docs/report_02.md)** for more discussions.
+- 🔧 **Data processing pipeline v1.1** is released. An automatic [processing pipeline](#data-processing) from raw videos to (text, video clip) pairs is provided, including scene cutting $\rightarrow$ filtering(aesthetic, optical flow, OCR, etc.) $\rightarrow$ captioning $\rightarrow$ managing. With this tool, you can easily build your video dataset.
+
+
+View more
+
+- ✅ Improved ST-DiT architecture includes rope positional encoding, qk norm, longer text length, etc.
+- ✅ Support training with any resolution, aspect ratio, and duration (including images).
+- ✅ Support image and video conditioning and video editing, and thus support animating images, connecting videos, etc.
+- 📍 **Open-Sora 1.0** released. Model weights are available [here](#model-weights). With only 400K video clips and 200 H800
+ days (compared with 152M samples in Stable Video Diffusion), we are able to generate 2s 512×512 videos. See our **[report 1.0](docs/report_01.md)** for more discussions.
+- ✅ Three-stage training from an image diffusion model to a video diffusion model. We provide the weights for each
+ stage.
+- ✅ Support training acceleration including accelerated transformer, faster T5 and VAE, and sequence parallelism.
+ Open-Sora improves **55%** training speed when training on 64x512x512 videos. Details locates
+ at [acceleration.md](docs/acceleration.md).
+- 🔧 **Data preprocessing pipeline v1.0**,
+ including [downloading](tools/datasets/README.md), [video cutting](tools/scene_cut/README.md),
+ and [captioning](tools/caption/README.md) tools. Our data collection plan can be found
+ at [datasets.md](docs/datasets.md).
+- ✅ We find VQ-VAE from [VideoGPT](https://wilson1yan.github.io/videogpt/index.html) has a low quality and thus adopt a
+ better VAE from [Stability-AI](https://huggingface.co/stabilityai/sd-vae-ft-mse-original). We also find patching in
+ the time dimension deteriorates the quality. See our **[report](docs/report_01.md)** for more discussions.
+- ✅ We investigate different architectures including DiT, Latte, and our proposed STDiT. Our **STDiT** achieves a better
+ trade-off between quality and speed. See our **[report](docs/report_01.md)** for more discussions.
+- ✅ Support clip and T5 text conditioning.
+- ✅ By viewing images as one-frame videos, our project supports training DiT on both images and videos (e.g., ImageNet &
+ UCF101). See [commands.md](docs/commands.md) for more instructions.
+- ✅ Support inference with official weights
+ from [DiT](https://github.com/facebookresearch/DiT), [Latte](https://github.com/Vchitect/Latte),
+ and [PixArt](https://pixart-alpha.github.io/).
+- ✅ Refactor the codebase. See [structure.md](docs/structure.md) to learn the project structure and how to use the
+ config files.
+
+
+
+### TODO list sorted by priority
+
+
+View more
+
+- [x] Training Video-VAE and adapt our model to new VAE.
+- [x] Scaling model parameters and dataset size.
+- [x] Incoporate a better scheduler (rectified flow).
+- [x] Evaluation pipeline.
+- [x] Complete the data processing pipeline (including dense optical flow, aesthetics scores, text-image similarity, etc.). See [the dataset](/docs/datasets.md) for more information
+- [x] Support image and video conditioning.
+- [x] Support variable aspect ratios, resolutions, durations.
+
+
+
+## Contents
+
+- [Installation](#installation)
+- [Model Weights](#model-weights)
+- [Gradio Demo](#gradio-demo)
+- [Inference](#inference)
+- [Data Processing](#data-processing)
+- [Training](#training)
+- [Evaluation](#evaluation)
+- [VAE Training & Evaluation](#vae-training--evaluation)
+- [Contribution](#contribution)
+- [Citation](#citation)
+- [Acknowledgement](#acknowledgement)
+
+Other useful documents and links are listed below.
+
+- Report: each version is trained from a image base seperately (not continuously trained), while a newer version will incorporate the techniques from the previous version.
+ - [report 1.2](docs/report_03.md): rectified flow, 3d-VAE, score condition, evaluation, etc.
+ - [report 1.1](docs/report_02.md): multi-resolution/length/aspect-ratio, image/video conditioning/editing, data preprocessing, etc.
+ - [report 1.0](docs/report_01.md): architecture, captioning, etc.
+ - [acceleration.md](docs/acceleration.md)
+- Repo structure: [structure.md](docs/structure.md)
+- Config file explanation: [config.md](docs/config.md)
+- Useful commands: [commands.md](docs/commands.md)
+- Data processing pipeline and dataset: [datasets.md](docs/datasets.md)
+- Each data processing tool's README: [dataset conventions and management](/tools/datasets/README.md), [scene cutting](/tools/scene_cut/README.md), [scoring](/tools/scoring/README.md), [caption](/tools/caption/README.md)
+- Evaluation: [eval/README.md](/eval/README.md)
+- Gallery: [gallery](https://hpcaitech.github.io/Open-Sora/)
+
+## Installation
+
+### Install from Source
+
+For CUDA 12.1, you can install the dependencies with the following commands. Otherwise, please refer to [Installation Documentation](docs/installation.md) for more instructions on different cuda version, and additional dependency for data preprocessing, VAE, and model evaluation.
+
+```bash
+# create a virtual env and activate (conda as an example)
+conda create -n opensora python=3.9
+conda activate opensora
+
+# download the repo
+git clone https://github.com/hpcaitech/Open-Sora
+cd Open-Sora
+
+# install torch, torchvision and xformers
+pip install -r requirements/requirements-cu121.txt
+
+# the default installation is for inference only
+pip install -v . # for development mode, `pip install -v -e .`
+```
+
+(Optional, recommended for fast speed, especially for training) To enable `layernorm_kernel` and `flash_attn`, you need to install `apex` and `flash-attn` with the following commands.
+
+```bash
+# install flash attention
+# set enable_flash_attn=False in config to disable flash attention
+pip install packaging ninja
+pip install flash-attn --no-build-isolation
+
+# install apex
+# set enable_layernorm_kernel=False in config to disable apex
+pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" git+https://github.com/NVIDIA/apex.git
+```
+
+### Use Docker
+
+Run the following command to build a docker image from Dockerfile provided.
+
+```bash
+docker build -t opensora .
+```
+
+Run the following command to start the docker container in interactive mode.
+
+```bash
+docker run -ti --gpus all -v .:/workspace/Open-Sora opensora
+```
+
+## Model Weights
+
+### Open-Sora 1.2 Model Weights
+
+| Model | Model Size | Data | #iterations | Batch Size | URL |
+| --------- | ---------- | ---- | ----------- | ---------- | ------------------------------------------------------------- |
+| Diffusion | 1.1B | 30M | 70k | Dynamic | [:link:](https://huggingface.co/hpcai-tech/OpenSora-STDiT-v3) |
+| VAE | 384M | 3M | 1M | 8 | [:link:](https://huggingface.co/hpcai-tech/OpenSora-VAE-v1.2) |
+
+See our **[report 1.2](docs/report_03.md)** for more infomation. Weight will be automatically downloaded when you run the inference script.
+
+> For users from mainland China, try `export HF_ENDPOINT=https://hf-mirror.com` to successfully download the weights.
+
+### Open-Sora 1.1 Model Weights
+
+
+View more
+
+| Resolution | Model Size | Data | #iterations | Batch Size | URL |
+| ------------------ | ---------- | -------------------------- | ----------- | ------------------------------------------------- | -------------------------------------------------------------------- |
+| mainly 144p & 240p | 700M | 10M videos + 2M images | 100k | [dynamic](/configs/opensora-v1-1/train/stage2.py) | [:link:](https://huggingface.co/hpcai-tech/OpenSora-STDiT-v2-stage2) |
+| 144p to 720p | 700M | 500K HQ videos + 1M images | 4k | [dynamic](/configs/opensora-v1-1/train/stage3.py) | [:link:](https://huggingface.co/hpcai-tech/OpenSora-STDiT-v2-stage3) |
+
+See our **[report 1.1](docs/report_02.md)** for more infomation.
+
+:warning: **LIMITATION**: This version contains known issues which we are going to fix in the next version (as we save computation resource for the next release). In addition, the video generation may fail for long duration, and high resolution will have noisy results due to this problem.
+
+
+
+### Open-Sora 1.0 Model Weights
+
+
+View more
+
+| Resolution | Model Size | Data | #iterations | Batch Size | GPU days (H800) | URL |
+| ---------- | ---------- | ------ | ----------- | ---------- | --------------- | --------------------------------------------------------------------------------------------- |
+| 16×512×512 | 700M | 20K HQ | 20k | 2×64 | 35 | [:link:](https://huggingface.co/hpcai-tech/Open-Sora/blob/main/OpenSora-v1-HQ-16x512x512.pth) |
+| 16×256×256 | 700M | 20K HQ | 24k | 8×64 | 45 | [:link:](https://huggingface.co/hpcai-tech/Open-Sora/blob/main/OpenSora-v1-HQ-16x256x256.pth) |
+| 16×256×256 | 700M | 366K | 80k | 8×64 | 117 | [:link:](https://huggingface.co/hpcai-tech/Open-Sora/blob/main/OpenSora-v1-16x256x256.pth) |
+
+Training orders: 16x256x256 $\rightarrow$ 16x256x256 HQ $\rightarrow$ 16x512x512 HQ.
+
+Our model's weight is partially initialized from [PixArt-α](https://github.com/PixArt-alpha/PixArt-alpha). The number of
+parameters is 724M. More information about training can be found in our **[report](/docs/report_01.md)**. More about
+the dataset can be found in [datasets.md](/docs/datasets.md). HQ means high quality.
+
+:warning: **LIMITATION**: Our model is trained on a limited budget. The quality and text alignment is relatively poor.
+The model performs badly, especially on generating human beings and cannot follow detailed instructions. We are working
+on improving the quality and text alignment.
+
+
+
+## Gradio Demo
+
+🔥 You can experience Open-Sora on our [🤗 Gradio application](https://huggingface.co/spaces/hpcai-tech/open-sora) on Hugging Face online.
+
+### Local Deployment
+
+If you want to deploy gradio locally, we have also provided a [Gradio application](./gradio) in this repository, you can use the following the command to start an interactive web application to experience video generation with Open-Sora.
+
+```bash
+pip install gradio spaces
+python gradio/app.py
+```
+
+This will launch a Gradio application on your localhost. If you want to know more about the Gradio applicaiton, you can refer to the [Gradio README](./gradio/README.md).
+
+To enable prompt enhancement and other language input (e.g., 中文输入), you need to set the `OPENAI_API_KEY` in the environment. Check [OpenAI's documentation](https://platform.openai.com/docs/quickstart) to get your API key.
+
+```bash
+export OPENAI_API_KEY=YOUR_API_KEY
+```
+
+### Getting Started
+
+In the Gradio application, the basic options are as follows:
+
+
+
+The easiest way to generate a video is to input a text prompt and click the "**Generate video**" button (scroll down if you cannot find). The generated video will be displayed in the right panel. Checking the "**Enhance prompt with GPT4o**" will use GPT-4o to refine the prompt, while "**Random Prompt**" button will generate a random prompt by GPT-4o for you. Due to the OpenAI's API limit, the prompt refinement result has some randomness.
+
+Then, you can choose the **resolution**, **duration**, and **aspect ratio** of the generated video. Different resolution and video length will affect the video generation speed. On a 80G H100 GPU, the generation speed (with `num_sampling_step=30`) and peak memory usage is:
+
+| | Image | 2s | 4s | 8s | 16s |
+| ---- | ------- | -------- | --------- | --------- | --------- |
+| 360p | 3s, 24G | 18s, 27G | 31s, 27G | 62s, 28G | 121s, 33G |
+| 480p | 2s, 24G | 29s, 31G | 55s, 30G | 108s, 32G | 219s, 36G |
+| 720p | 6s, 27G | 68s, 41G | 130s, 39G | 260s, 45G | 547s, 67G |
+
+Note that besides text to video, you can also use **image to video generation**. You can upload an image and then click the "**Generate video**" button to generate a video with the image as the first frame. Or you can fill in the text prompt and click the "**Generate image**" button to generate an image with the text prompt, and then click the "**Generate video**" button to generate a video with the image generated with the same model.
+
+
+
+Then you can specify more options, including "**Motion Strength**", "**Aesthetic**" and "**Camera Motion**". If "Enable" not checked or the choice is "none", the information is not passed to the model. Otherwise, the model will generate videos with the specified motion strength, aesthetic score, and camera motion.
+
+For the **aesthetic score**, we recommend using values higher than 6. For **motion strength**, a smaller value will lead to a smoother but less dynamic video, while a larger value will lead to a more dynamic but likely more blurry video. Thus, you can try without it and then adjust it according to the generated video. For the **camera motion**, sometimes the model cannot follow the instruction well, and we are working on improving it.
+
+You can also adjust the "**Sampling steps**", this is directly related to the generation speed as it is the number of denoising. A number smaller than 30 usually leads to a poor generation results, while a number larger than 100 usually has no significant improvement. The "**Seed**" is used for reproducibility, you can set it to a fixed number to generate the same video. The "**CFG Scale**" controls how much the model follows the text prompt, a smaller value will lead to a more random video, while a larger value will lead to a more text-following video (7 is recommended).
+
+For more advanced usage, you can refer to [Gradio README](./gradio/README.md#advanced-usage).
+
+## Inference
+
+### Open-Sora 1.2 Command Line Inference
+
+The basic command line inference is as follows:
+
+```bash
+# text to video
+python scripts/inference.py configs/opensora-v1-2/inference/sample.py \
+ --num-frames 4s --resolution 720p --aspect-ratio 9:16 \
+ --prompt "a beautiful waterfall"
+```
+
+You can add more options to the command line to customize the generation.
+
+```bash
+python scripts/inference.py configs/opensora-v1-2/inference/sample.py \
+ --num-frames 4s --resolution 720p --aspect-ratio 9:16 \
+ --num-sampling-steps 30 --flow 5 --aes 6.5 \
+ --prompt "a beautiful waterfall"
+```
+
+For image to video generation and other functionalities, the API is compatible with Open-Sora 1.1. See [here](docs/commands.md) for more instructions.
+
+If your installation do not contain `apex` and `flash-attn`, you need to disable them in the config file, or via the folowing command.
+
+```bash
+python scripts/inference.py configs/opensora-v1-2/inference/sample.py \
+ --num-frames 4s --resolution 720p \
+ --layernorm-kernel False --flash-attn False \
+ --prompt "a beautiful waterfall"
+```
+
+### Sequence Parallelism Inference
+
+To enable sequence parallelism, you need to use `torchrun` to run the inference script. The following command will run the inference with 2 GPUs.
+
+```bash
+# text to video
+CUDA_VISIBLE_DEVICES=0,1 torchrun --nproc_per_node 2 scripts/inference.py configs/opensora-v1-2/inference/sample.py \
+ --num-frames 4s --resolution 720p --aspect-ratio 9:16 \
+ --prompt "a beautiful waterfall"
+```
+
+:warning: **LIMITATION**: The sequence parallelism is not supported for gradio deployment. For now, the sequence parallelism is only supported when the dimension can be divided by the number of GPUs. Thus, it may fail for some cases. We tested 4 GPUs for 720p and 2 GPUs for 480p.
+
+### GPT-4o Prompt Refinement
+
+We find that GPT-4o can refine the prompt and improve the quality of the generated video. With this feature, you can also use other language (e.g., Chinese) as the prompt. To enable this feature, you need prepare your openai api key in the environment:
+
+```bash
+export OPENAI_API_KEY=YOUR_API_KEY
+```
+
+Then you can inference with `--llm-refine True` to enable the GPT-4o prompt refinement, or leave prompt empty to get a random prompt generated by GPT-4o.
+
+```bash
+python scripts/inference.py configs/opensora-v1-2/inference/sample.py \
+ --num-frames 4s --resolution 720p --llm-refine True
+```
+
+### Open-Sora 1.1 Command Line Inference
+
+
+View more
+
+Since Open-Sora 1.1 supports inference with dynamic input size, you can pass the input size as an argument.
+
+```bash
+# text to video
+python scripts/inference.py configs/opensora-v1-1/inference/sample.py --prompt "A beautiful sunset over the city" --num-frames 32 --image-size 480 854
+```
+
+If your installation do not contain `apex` and `flash-attn`, you need to disable them in the config file, or via the folowing command.
+
+```bash
+python scripts/inference.py configs/opensora-v1-1/inference/sample.py --prompt "A beautiful sunset over the city" --num-frames 32 --image-size 480 854 --layernorm-kernel False --flash-attn False
+```
+
+See [here](docs/commands.md#inference-with-open-sora-11) for more instructions including text-to-image, image-to-video, video-to-video, and infinite time generation.
+
+
+
+### Open-Sora 1.0 Command Line Inference
+
+
+View more
+
+We have also provided an offline inference script. Run the following commands to generate samples, the required model weights will be automatically downloaded. To change sampling prompts, modify the txt file passed to `--prompt-path`. See [here](docs/structure.md#inference-config-demos) to customize the configuration.
+
+```bash
+# Sample 16x512x512 (20s/sample, 100 time steps, 24 GB memory)
+torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x512x512.py --ckpt-path OpenSora-v1-HQ-16x512x512.pth --prompt-path ./assets/texts/t2v_samples.txt
+
+# Sample 16x256x256 (5s/sample, 100 time steps, 22 GB memory)
+torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path OpenSora-v1-HQ-16x256x256.pth --prompt-path ./assets/texts/t2v_samples.txt
+
+# Sample 64x512x512 (40s/sample, 100 time steps)
+torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/64x512x512.py --ckpt-path ./path/to/your/ckpt.pth --prompt-path ./assets/texts/t2v_samples.txt
+
+# Sample 64x512x512 with sequence parallelism (30s/sample, 100 time steps)
+# sequence parallelism is enabled automatically when nproc_per_node is larger than 1
+torchrun --standalone --nproc_per_node 2 scripts/inference.py configs/opensora/inference/64x512x512.py --ckpt-path ./path/to/your/ckpt.pth --prompt-path ./assets/texts/t2v_samples.txt
+```
+
+The speed is tested on H800 GPUs. For inference with other models, see [here](docs/commands.md) for more instructions.
+To lower the memory usage, set a smaller `vae.micro_batch_size` in the config (slightly lower sampling speed).
+
+
+
+## Data Processing
+
+High-quality data is crucial for training good generation models.
+To this end, we establish a complete pipeline for data processing, which could seamlessly convert raw videos to high-quality video-text pairs.
+The pipeline is shown below. For detailed information, please refer to [data processing](docs/data_processing.md).
+Also check out the [datasets](docs/datasets.md) we use.
+
+
+
+## Training
+
+### Open-Sora 1.2 Training
+
+The training process is same as Open-Sora 1.1.
+
+```bash
+# one node
+torchrun --standalone --nproc_per_node 8 scripts/train.py \
+ configs/opensora-v1-2/train/stage1.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
+# multiple nodes
+colossalai run --nproc_per_node 8 --hostfile hostfile scripts/train.py \
+ configs/opensora-v1-2/train/stage1.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
+```
+
+### Open-Sora 1.1 Training
+
+
+View more
+
+Once you prepare the data in a `csv` file, run the following commands to launch training on a single node.
+
+```bash
+# one node
+torchrun --standalone --nproc_per_node 8 scripts/train.py \
+ configs/opensora-v1-1/train/stage1.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
+# multiple nodes
+colossalai run --nproc_per_node 8 --hostfile hostfile scripts/train.py \
+ configs/opensora-v1-1/train/stage1.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
+```
+
+
+
+### Open-Sora 1.0 Training
+
+
+View more
+
+Once you prepare the data in a `csv` file, run the following commands to launch training on a single node.
+
+```bash
+# 1 GPU, 16x256x256
+torchrun --nnodes=1 --nproc_per_node=1 scripts/train.py configs/opensora/train/16x256x256.py --data-path YOUR_CSV_PATH
+# 8 GPUs, 64x512x512
+torchrun --nnodes=1 --nproc_per_node=8 scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
+```
+
+To launch training on multiple nodes, prepare a hostfile according
+to [ColossalAI](https://colossalai.org/docs/basics/launch_colossalai/#launch-with-colossal-ai-cli), and run the
+following commands.
+
+```bash
+colossalai run --nproc_per_node 8 --hostfile hostfile scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
+```
+
+For training other models and advanced usage, see [here](docs/commands.md) for more instructions.
+
+
+
+## Evaluation
+
+We support evaluation based on:
+
+- Validation loss
+- [VBench](https://github.com/Vchitect/VBench/tree/master) score
+- VBench-i2v score
+- Batch generation for human evaluation
+
+All the evaluation code is released in `eval` folder. Check the [README](/eval/README.md) for more details. Our [report](/docs/report_03.md#evaluation) also provides more information about the evaluation during training. The following table shows Open-Sora 1.2 greatly improves Open-Sora 1.0.
+
+| Model | Total Score | Quality Score | Semantic Score |
+| -------------- | ----------- | ------------- | -------------- |
+| Open-Sora V1.0 | 75.91% | 78.81% | 64.28% |
+| Open-Sora V1.2 | 79.23% | 80.71% | 73.30% |
+
+## VAE Training & Evaluation
+
+We train a VAE pipeline that consists of a spatial VAE followed by a temporal VAE.
+For more details, refer to [VAE Documentation](docs/vae.md).
+Before you run the following commands, follow our [Installation Documentation](docs/installation.md) to install the required dependencies for VAE and Evaluation.
+
+If you want to train your own VAE, we need to prepare data in the csv following the [data processing](#data-processing) pipeline, then run the following commands.
+Note that you need to adjust the number of trained epochs (`epochs`) in the config file accordingly with respect to your own csv data size.
+
+```bash
+# stage 1 training, 380k steps, 8 GPUs
+torchrun --nnodes=1 --nproc_per_node=8 scripts/train_vae.py configs/vae/train/stage1.py --data-path YOUR_CSV_PATH
+# stage 2 training, 260k steps, 8 GPUs
+torchrun --nnodes=1 --nproc_per_node=8 scripts/train_vae.py configs/vae/train/stage2.py --data-path YOUR_CSV_PATH
+# stage 3 training, 540k steps, 24 GPUs
+torchrun --nnodes=3 --nproc_per_node=8 scripts/train_vae.py configs/vae/train/stage3.py --data-path YOUR_CSV_PATH
+```
+
+To evaluate the VAE performance, you need to run VAE inference first to generate the videos, then calculate scores on the generated videos:
+
+```bash
+# video generation
+torchrun --standalone --nnodes=1 --nproc_per_node=1 scripts/inference_vae.py configs/vae/inference/video.py --ckpt-path YOUR_VAE_CKPT_PATH --data-path YOUR_CSV_PATH --save-dir YOUR_VIDEO_DIR
+# the original videos will be saved to `YOUR_VIDEO_DIR_ori`
+# the reconstructed videos through the pipeline will be saved to `YOUR_VIDEO_DIR_rec`
+# the reconstructed videos through the spatial VAE only will be saved to `YOUR_VIDEO_DIR_spatial`
+
+# score calculation
+python eval/vae/eval_common_metric.py --batch_size 2 --real_video_dir YOUR_VIDEO_DIR_ori --generated_video_dir YOUR_VIDEO_DIR_rec --device cuda --sample_fps 24 --crop_size 256 --resolution 256 --num_frames 17 --sample_rate 1 --metric ssim psnr lpips flolpips
+```
+
+## Contribution
+
+Thanks goes to these wonderful contributors:
+
+
+
+
+
+If you wish to contribute to this project, please refer to the [Contribution Guideline](./CONTRIBUTING.md).
+
+## Acknowledgement
+
+Here we only list a few of the projects. For other works and datasets, please refer to our report.
+
+- [ColossalAI](https://github.com/hpcaitech/ColossalAI): A powerful large model parallel acceleration and optimization
+ system.
+- [DiT](https://github.com/facebookresearch/DiT): Scalable Diffusion Models with Transformers.
+- [OpenDiT](https://github.com/NUS-HPC-AI-Lab/OpenDiT): An acceleration for DiT training. We adopt valuable acceleration
+ strategies for training progress from OpenDiT.
+- [PixArt](https://github.com/PixArt-alpha/PixArt-alpha): An open-source DiT-based text-to-image model.
+- [Latte](https://github.com/Vchitect/Latte): An attempt to efficiently train DiT for video.
+- [StabilityAI VAE](https://huggingface.co/stabilityai/sd-vae-ft-mse-original): A powerful image VAE model.
+- [CLIP](https://github.com/openai/CLIP): A powerful text-image embedding model.
+- [T5](https://github.com/google-research/text-to-text-transfer-transformer): A powerful text encoder.
+- [LLaVA](https://github.com/haotian-liu/LLaVA): A powerful image captioning model based on [Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-v0.1) and [Yi-34B](https://huggingface.co/01-ai/Yi-34B).
+- [PLLaVA](https://github.com/magic-research/PLLaVA): A powerful video captioning model.
+- [MiraData](https://github.com/mira-space/MiraData): A large-scale video dataset with long durations and structured caption.
+
+We are grateful for their exceptional work and generous contribution to open source. Special thanks go to the authors of [MiraData](https://github.com/mira-space/MiraData) and [Rectified Flow](https://github.com/gnobitab/RectifiedFlow) for their valuable advice and help. We wish to express gratitude towards AK for sharing this project on social media and Hugging Face for providing free GPU resources for our online Gradio demo.
+
+## Citation
+
+```bibtex
+@software{opensora,
+ author = {Zangwei Zheng and Xiangyu Peng and Tianji Yang and Chenhui Shen and Shenggui Li and Hongxin Liu and Yukun Zhou and Tianyi Li and Yang You},
+ title = {Open-Sora: Democratizing Efficient Video Production for All},
+ month = {March},
+ year = {2024},
+ url = {https://github.com/hpcaitech/Open-Sora}
+}
+```
+
+## Star History
+
+[](https://star-history.com/#hpcaitech/Open-Sora&Date)
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/all_category.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/all_category.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fd797edb2107daadc90636e3830eb9387380ff6f
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/all_category.txt
@@ -0,0 +1,800 @@
+a black dog wearing halloween costume
+spider making a web
+bat eating fruits while hanging
+a snake crawling on a wooden flooring
+a close up video of a dragonfly
+macro shot of ladybug on green leaf plant
+chameleon eating ant
+a bee feeding on nectars
+bird nests on a tree captured with moving camera
+a squirrel eating nuts
+close up video of snail
+top view of a hermit crab crawling on a wooden surface
+cat licking another cat
+red dragonfly perched on green leaf
+close up view of a brown caterpillar crawling on green leaf
+ants eating dead spider
+an eagle on a tree branch
+a frog eating an ant
+white rabbit near the fence
+a gorilla eating a carrot
+close up of wolf
+a meerkat looking around
+a hyena in a zoo
+lemur eating grass leaves
+an owl being trained by a man
+a lizard on a bamboo
+brown chicken hunting for its food
+video of parrots perched on bird stand
+underwater footage of an octopus in a coral reef
+a cute pomeranian dog playing with a soccer ball
+white fox on rock
+close up footage of a horse figurine
+giraffe feeding on a tree in a savannah
+curious cat sitting and looking around
+hummingbird hawk moth flying near pink flowers
+close up of a scorpion on a rock
+close up on fish in net
+koala eating leaves from a branch
+a pod of dolphins swirling in the sea catching forage fish
+low angle view of a hawk perched on a tree branch
+a lion standing on wild grass
+deer grazing in the field
+elephant herd in a savanna
+close up on lobster under water
+hedgehog crossing road in forest
+a sheep eating yellow flowers from behind a wire fence
+twin sisters and a turtle
+a pig wallowing in mud
+flock of goose eating on the lake water
+cow in a field irritated with flies
+a close up shot of a fly
+cheetah lying on the grass
+close up of a lemur
+close up shot of a kangaroo itching in the sand
+a tortoise covered with algae
+turkey in cage
+a great blue heron bird in the lakeside
+crab with shell in aquarium
+a seagull walking on shore
+an american crocodile
+a tiger walking inside a cage
+alligator in the nature
+a raccoon climbing a tree
+wild rabbit in a green meadow
+group of ring tailed lemurs
+a clouded leopard on a tree branch
+duck grooming its feathers
+an african penguin walking on a beach
+a video of a peacock
+close up shot of a wild bear
+baby rhino plays with mom
+porcupine climbs tree branches
+close up of a natterjack toad on a rock
+a sleeping orangutan
+mother whale swimming with babies
+a bear wearing red jersey
+pink jellyfish swimming underwater in a blue sea
+beautiful clown fish swimming
+animation of disposable objects shaped as a whale
+paper cut out of a pair of hands a whale and a heart
+vertical video of camel roaming in the field during daytime
+a still video of mosquito biting human
+a curious sloth hanging from a tree branch
+a plastic flamingo bird stumbles from the wind
+a wolf in its natural habitat
+a monkey sitting in the stone and scratching his head
+bat hanging upside down
+a red panda eating leaves
+snake on ground
+a harbour seal swimming near the shore
+shark swimming in the sea
+otter on branch while eating
+goat standing over a rock
+a troop of monkey on top of a mountain
+a zebra eating grass on the field
+a colorful butterfly perching on a bud
+a snail crawling on a leaf
+zookeeper showering a baby elephant
+a beetle emerging from the sand
+a nine banded armadillo searching for food
+an apartment building with balcony
+asian garden and medieval castle
+illuminated tower in berlin
+a wooden house overseeing the lake
+a crowd of people in a plaza in front of a government building
+a church interior
+jewish friends posing with hanukkah menorah in a cabin house
+a destroyed building after a missile attack in ukraine
+abandoned building in the woods
+drone video of an abandoned school building in pripyat ukraine
+elegant university building
+architecture and designs of buildings in central london
+a pancake tower with chocolate syrup and strawberries on top
+an ancient white building
+friends hanging out at a coffee house
+house front door with christmas decorations
+city night dark building
+a bird house hanging on a tree branch
+sacred sculpture in a temple
+high angle shot of a clock tower
+modern wooden house interior
+the interior of an abandoned building
+opera house overlooking sea
+a concrete structure near the green trees
+dome like building in scotland
+low angle shot of a building
+tower on hill
+a miniature house
+eiffel tower from the seine river
+low angle footage of an apartment building
+island with pier and antique building
+asian historic architecture
+drone footage of a beautiful mansion
+mosque in the middle east
+building a tent and hammock in the forest camping site
+top view of a high rise building
+house covered in snow
+skyscraper at night
+house in village
+a casino with people outside the building
+silhouette of a building
+a woman climbing a tree house
+drone view of house near lake during golden hour
+an under construction concrete house
+a watch tower by the sea
+exterior view of arabic style building
+video of a hotel building
+red paper lantern decorations hanging outside a building
+house on seashore
+aerial footage of the palace of culture and science building in warsaw poland
+aerial video of stuttgart tv tower in germany
+aerial view of the highway and building in a city
+drone shot of a skyscraper san francisco california usa
+waterfall and house
+view of the sky through a building
+drone footage of a house on top of the mountain
+abandoned house in the nature
+clouds hovering over a mansion
+light house on the ocean
+buddhist temple at sunrise
+people walking by a graveyard near a mosque at sunset
+view of lifeguard tower on the beach
+scenic view of a house in the mountains
+the landscape in front of a government building
+aerial footage of a building and its surrounding landscape in winter
+time lapse of a cloudy sky behind a transmission tower
+blue ocean near the brown castle
+fog over temple
+house in countryside top view
+building under construction
+turkish flag waving on old tower
+the georgian building
+close up shot of a steel structure
+the atrium and interior design of a multi floor building
+city view reflected on a glass building
+aerial view of a luxurious house with pool
+an unpaved road leading to the house
+drone footage of a lookout tower in mountain landscape
+wind turbines on hill behind building
+time lapse footage of the sun light in front of a small house porch
+a building built with lots of stairways
+overcast over house on seashore
+the view of the sydney opera house from the other side of the harbor
+candle on a jar and a house figurine on a surface
+video of a farm and house
+a dilapidated building made of bricks
+a view of a unique building from a moving vehicle
+aerial footage of a tall building in cambodia
+push in shot of a huge house
+a beach house built over a seawall protected from the sea waves
+exotic house surrounded by trees
+drone video of a house surrounded by tropical vegetation
+drone footage of a building beside a pond
+observation tower on hill in forest
+a tree house in the woods
+a video of vessel structure during daytime
+fire in front of illuminated building at night
+a footage of a wooden house on a wheat field
+tilt shot of a solar panel below a light tower
+water tower on the desert
+freshly baked finger looking cookies
+video of fake blood in wine glass
+halloween food art
+a person slicing a vegetable
+a serving of pumpkin dish in a plate
+close up view of green leafy vegetable
+a birthday cake in the plate
+video of a slice papaya fruit
+a muffin with a burning candle and a love sign by a ceramic mug
+a jack o lantern designed cookie
+baked bread with chocolate
+a broccoli soup on wooden table
+a freshly brewed coffee on a pink mug
+grabbing sourdough neapolitan style pizza slices
+person cooking mushrooms in frying pan
+rice grains placed on a reusable cloth bag
+slices of kiwi fruit
+grilling a steak on a pan grill
+close up of bread popping out of a toaster
+man eating noodle
+preparing a cocktail drink
+close up pasta with bacon on plate
+milk and cinnamon rolls
+boy getting a dumpling using chopsticks
+a mother preparing food with her kids
+man using his phone while eating
+fresh salmon salad on a plate
+cutting cucumbers into long thin slices as ingredient for sushi roll
+a steaming cup of tea by the window
+a glass filled with beer
+a kid eating popcorn while watching tv
+close up shot of fried fish on the plate
+a man eating a donut
+person making a vegetarian dish
+spreading cheese on bagel
+close up view of a man drinking red wine
+a couple having breakfast in a restaurant
+a student eating her sandwich
+girl peeling a banana
+red rice in a small bowl
+pancake with blueberry on the top
+green apple fruit on white wooden table
+a man eating a taco by the bar
+making of a burrito
+squeezing lemon into salad
+a chef cutting sushi rolls
+video of a delicious dessert
+deep frying a crab on a wok in high fire
+close up video of a orange juice
+video of a cooked chicken breast
+woman holding a pineapple
+a woman eating a bar of chocolate
+decorating christmas cookie
+squeezing a slice of fruit
+tuna sashimi on a plate
+a strawberry fruit mixed in an alcoholic drink
+preparing hot dogs in a grill
+a woman cutting a tomato
+an orange fruit cut in half
+a coconut fruit with drinking straw
+woman holding a dragon fruit
+a woman pouring hot beverage on a cup
+waffles with whipped cream and fruit
+focus shot of an insect at the bottom of a fruit
+preparing a healthy broccoli dish
+man eating snack at picnic
+close up video of a grilled shrimp skewer
+a woman mixing a smoothie drinks
+close up video of woman having a bite of jelly
+businessman drinking whiskey at the bar counter of a hotel lounge
+cutting an onion with a knife over a wooden chopping board
+fresh lemonade in bottles
+grilling a meat on a charcoal grill
+people enjoying asian cuisine
+close up footage of a hot dish on a clay pot
+pork ribs dish
+waffle with strawberry and syrup for breakfast
+tofu dish with rose garnish
+uncooked pork meat
+egg yolk being dumped over gourmet dish
+tasty brunch dish close up
+little boy pretending to eat the watermelon
+slicing roasted beef
+close up of a chef adding teriyaki sauce to a dish
+flat lay mexican dish
+a person placing an octopus dish on a marble surface
+close up of tea leaves brewing in a glass kettle
+adding fresh herbs to soup dish
+a scoop of roasted coffee beans
+fresh dim sum set up on a bamboo steam tray for cooking
+a girl putting ketchup on food at the kitchen
+cooking on electric stove
+a woman with a slice of a pie
+grapes and wine on a wooden board
+man taking picture of his food
+hamburger and fries on restaurant table
+close up video of japanese food
+a cracker sandwich with cheese filling for snack
+barista preparing matcha tea
+close up of onion rings being deep fried
+people carving a pumpkin
+people sitting on a sofa
+a man with a muertos face painting
+man walking in the dark
+men in front of their computer editing photos
+men loading christmas tree on tow truck
+woman washing the dishes
+woman adding honey to the cinnamon rolls
+two women kissing and smiling
+three women looking at watercolor paintings
+a family wearing paper bag masks
+a family posing for the camera
+a boy covering a rose flower with a dome glass
+boy sitting on grass petting a dog
+a girl in her tennis sportswear
+a girl coloring the cardboard
+silhouette of the couple during sunset
+couple dancing with body paint
+a child playing with water
+a woman with her child sitting on a couch in the living room
+a group of friend place doing hand gestures of agreement
+friends having a group selfie
+friends talking while on the basketball court
+group of people protesting
+a group of campers with a cute dog
+a group of photographers taking pictures at the north western gardens in llandudno north wales
+a group of students laughing and talking
+a group of martial artist warming up
+a person playing golf
+a person walking on a wet wooden bridge
+person doing a leg exercise
+ice hockey athlete on rink
+a young athlete training in swimming
+chess player dusting a chessboard
+baseball player holding his bat
+a bearded man putting a vinyl record on a vinyl player
+an orchestra finishes a performance
+people applauding the performance of the kids
+band performance at the recording studio
+father and his children playing jenga game
+people playing a board game
+man playing a video game
+a man video recording the movie in theater
+man and a woman eating while watching a movie
+movie crew talking together
+a director explaining the movie scene
+man and woman listening to music on car
+man playing music
+couple dancing slow dance with sun glare
+a ballerina practicing in the dance studio
+father and son holding hands
+father and daughter talking together
+a mother and her kids engaged in a video call
+mother and daughter reading a book together
+a mother teaching her daughter playing a violin
+kid in a halloween costume
+a happy kid playing the ukulele
+a chef slicing a cucumber
+chef wearing his gloves properly
+brother and sister using hammock
+girl applying sunblock to her brother
+a girl pushing the chair while her sister is on the chair
+colleagues talking in office building
+fighter practice kicking
+a woman fighter in her cosplay costume
+an engineer holding blueprints while talking with her colleague
+a young woman looking at vr controllers with her friend
+workmates teasing a colleague in the work
+a male police officer talking on the radio
+teacher holding a marker while talking
+teacher writing on her notebook
+a young student attending her online classes
+a student showing his classmates his wand
+a male vendor selling fruits
+a shirtless male climber
+a sound engineer listening to music
+female talking to a psychiatrist in a therapy session
+young female activist posing with flag
+a man in a hoodie and woman with a red bandana talking to each other and smiling
+a medium close up of women wearing kimonos
+a male interviewer listening to a person talking
+a social worker having a conversation with the foster parents
+a farm worker harvesting onions
+worker packing street food
+worker and client at barber shop
+elderly man lifting kettlebell
+mom assisting son in riding a bicycle
+dad watching her daughter eat
+young guy with vr headset
+pregnant woman exercising with trainer
+a fortune teller talking to a client
+wizard doing a ritual on a woman
+a footage of an actor on a movie scene
+a man holding a best actor trophy
+a singer of a music band
+a young singer performing on stage
+young dancer practicing at home
+seller showing room to a couple
+cab driver talking to passenger
+a policeman talking to the car driver
+kids celebrating halloween at home
+little boy helping mother in kitchen
+video of a indoor green plant
+a girl arranges a christmas garland hanging by the kitchen cabinet
+candle burning in dark room
+couple having fun and goofing around the bedroom
+girls jumping up and down in the bedroom
+woman and man in pajamas working from home
+a muslim family sitting and talking in the living room
+family enjoying snack time while sitting in the living room
+woman holding an animal puppet and a little girl playing together at the living room
+kids playing in the indoor tent
+young people celebrating new year at the office
+a woman writing on the sticky note in the office
+a woman exercising at home over a yoga mat
+girls preparing easter decorations at home
+dog on floor in room
+turning on a fluorescent light inside a room
+colleagues talking to each other near the office windows
+a woman recording herself while exercising at home
+music room
+different kind of tools kept in a utility room
+sofa beds and other furniture
+a girl finding her brother reading a book in the bedroom
+an elegant ceramic plant pot and hanging plant on indoor
+furniture inside a bedroom
+interior design of the bar section
+living room with party decoration
+firewood burning in dark room
+a young woman playing the ukulele at home
+woman painting at home
+a woman in a locker room
+video of a bathroom interior
+the interior design of a jewish synagogue
+a woman in protective suit disinfecting the kitchen
+modern minimalist home interior
+modern interior design of a coffee shop
+person arranging minimalist furniture
+aerial shot of interior of the warehouse
+a room of a manufacturing facility
+interior of catholic
+interior design of a restaurant
+a female model in a changing room looking herself in mirror
+men walking in the office hallway
+people sitting in a conference room
+the interior design of a shopping mall
+chandeliers in room
+lucerne railway station interior
+a female fencer posing in a foggy room
+a toolbox and a paint roller beside a huge package in a room
+bedroom in hotel
+a woman lying in the operating room
+a chef holding and checking kitchen utensils
+a couple singing in the shower room together
+a woman cleaning mess in the living room
+an empty meeting room with natural light
+person dancing in a dark room
+close up on blood in hospital room
+a couple resting on their home floor
+a young female staff at courier office
+a man entering the gym locker room
+a bored man sitting by the tv at home
+woman dancing in indoor garden
+rubble in the interior of an abandoned house
+indoor farm in a greenhouse
+man doing handstand in indoor garden
+an abandoned indoor swimming pool
+home decorations on top of a cabinet
+graffiti art on the interior walls of an abandoned mansion
+indoor wall climbing activity
+sunlight inside a room
+teenage girl roller skating at indoor rink
+home deco with lighted
+baby in the shower room
+men enjoying office christmas party
+a bedroom with a brick wall
+actors prepping in the dressing room
+kids playing at an indoor playground
+a person sanitizing an office space using smoke machine
+mother and daughter choosing clothes at home
+a woman sitting by the indoor fire pit
+man standing on the corner of the room while looking around
+person assembling furniture
+a family stacking cardboard boxes in a room
+family having fun in the dining room
+person disinfecting a room
+a woman washing strawberries in the kitchen sink
+modern office waiting room
+close up view of a person slicing with a kitchen knife
+boiling coffee on a stove in the kitchen
+modern equipment used in a home studio
+interior of a recording studio
+people working in a call center office
+band performing at a home concert
+a group of people watching a concert in a room
+people packing their furniture
+young employees in office holding a certificate
+a criminal inside a dark room handcuffed in a table
+couple browsing and looking for furniture in the store
+workspace at home
+video of a indoor green plant
+close up view of a plant
+close up shot of a burning plant
+plucking leaves from plant
+a plant on gold pot with glass lid
+a branch of a tree and a plant
+a leafless tree
+close up shot of fern leaf
+close up video of strawberry plant
+plant with blooming flowers
+close up video of flower petals
+watering yellow plant
+beautiful flower decoration
+cannabis flower in a jar
+a footage of the tree leaves
+a red leaf plant
+close up view of a white christmas tree
+snow pouring on a tree
+close up shot of white flowers on the tree
+leaves in the trees daytime
+a dead tree lying on a grass field
+tree branches in a flowing river
+purple flowers with leaves
+a coconut tree by the house
+close up on flower in winter
+bamboo leaves backlit by the sun
+close up video of a wet flower
+a man putting a flower in a box
+dropping flower petals on a wooden bowl
+a close up shot of gypsophila flower
+variety of succulent plants on a garden
+variety of trees and plants in a botanical garden
+forest of deciduous trees
+a stack of dried leaves burning in a forest
+tall forest trees on a misty morning
+close up view of dewdrops on a leaf
+close up view of white petaled flower
+removing a pineapple leaf
+a dragonfly perched on a leaf
+butterfly pollinating flower
+person visiting and checking a corn plant
+woman picking beans from a plant
+woman plucking mint leaves
+single tree in the middle of farmland
+a plant on a soil
+drone footage of a tree on farm field
+a tractor harvesting lavender flower
+people putting christmas ornaments on a christmas tree
+jack o lantern hanging on a tree
+tree with halloween decoration
+flower field near the waterfall
+truck carrying the tree logs
+raindrops falling on leaves
+shot of a palm tree swaying with the wind
+squirrels on a tree branch
+person holding a flower
+a fallen tree trunk
+tree with golden leaves
+cherry tree
+wind blows through leaves of the tree in autumn
+a leaf on a glass
+the long trunks of tall trees in the forest
+trees in the forest during sunny day
+close up video of tree bark
+reflection of tree branches
+trunks of many trees in the forest
+tree leaves providing shades from the sun
+leaves swaying in the wind
+low angle shot of baobab tree
+bare trees in forest
+a plant surrounded by fallen leaves
+a couple preparing food and pruning a plant
+a man cutting a tree bark
+oranges on a tree branch
+plant connected on the stones
+video of a sawmill machine cutting tree log
+women drying flower petals
+macro view of an agave plant
+a video of a person tying a plant on a string
+green moss in forest nature
+coconut tree near sea under blue sky
+the canopy of a coconut tree
+a man leaning on a tree at the beach
+a full grown plant on a pot
+candle wax dripping on flower petals
+close up of leaves in autumn
+a woman opening a book with a flower inside
+a man holding leaves looking at the camera
+a shadow of a swaying plant
+a tree and concrete structure under a blue and cloudy sky
+trimming excess leaves on a potted plant
+the changing color of the tree leaves during autumn season
+a gooseberry tree swayed by the wind
+forest trees and a medieval castle at sunset
+woman cut down tree
+an old oak tree in a park across the street from a hotel
+wild flowers growing in a forest ground
+a mossy fountain and green plants in a botanical garden
+mansion with beautiful garden
+ants on a dragon fruit flower
+scenery of desert landscape
+landscape agriculture farm tractor
+burning slash piles in the forest
+graveyard at sunset
+view of a jack o lantern with pumpkins in a smoky garden
+sun view through a spider web
+view of the sea from an abandoned building
+close up view of a full moon
+close up view of lighted candles
+close up view of swaying white flowers and leaves
+scenery of a relaxing beach
+selective focus video of grass during sunny day
+aerial view of brown dry landscape
+fireworks display in the sky at night
+a bonfire near river
+mountain view
+waterfalls in between mountain
+a picturesque view of nature
+exotic view of a riverfront city
+tall trees in the forest under the clear sky
+snow on branches in forest
+stream in the nature
+an airplane flying above the sea of clouds
+scenic video of sunset
+view of houses with bush fence under a blue and cloudy sky
+scenic view from wooden pathway
+scenic view of a tropical beach
+drone footage of waves crashing on beach shore
+a scenic view of the golden hour at norway
+time lapse video of foggy mountain forest
+brown mountain during fall season
+video of ocean during daytime
+boat sailing in the ocean
+top view of yachts
+beautiful scenery of flowing waterfalls and river
+wild ducks paddling on the lake surface
+a relaxing scenery of beach view under cloudy sky
+natural rock formations on beach under cloudy sky
+a palm tree against blue sky
+video of sailboat on a lake during sunset
+aerial view of snow piles
+time lapse of a sunset sky in the countryside
+aerial footage of a statue
+time lapse video of a farm during sunset
+clouds formation in the sky at sunset
+aerial shot of a village
+drone shot of a beautiful sunrise at the mountains
+time lapse video of foggy morning during sunrise
+sun shining between tree leaves at sunrise
+video of lake during dawn
+vehicles traveling on roadway under cloudy sky
+view of golden domed church
+a monument under the blue sky
+firecrackers in the sky
+view of fruit signage in the farm
+a dark clouds over shadowing the full moon
+view of the amazon river
+a big river swamp in a dense forest
+a blooming cherry blossom tree under a blue sky with white clouds
+a river waterfall cascading down the plunge basin
+flooded landscape with palm trees
+a blurry waterfall background
+waterfall in the mountains
+aerial footage of a city at night
+pond by small waterfall in forest
+aerial view of farmlands at the bay of lake
+rice terraces in the countryside
+a highway built across an agricultural area in the countryside
+gloomy morning in the countryside
+drone shot of an abandoned coliseum on a snowy mountain top
+boat sailing in the middle of ocean
+drone shot of the grass field
+natural landscape of mountain and sea with islets developed into a community
+aerial view of zaporizhia in ukraine
+aerial footage of a herd
+an aerial footage of a red sky
+grass and plants growing in the remains of an abandoned house
+view from hill on city
+aerial view on orthodox church
+aerial view of bay in croatia
+a footage of a frozen river
+overlooking view of a city at daylight
+view outside the cemetery
+clear sky with moon over meadow
+clouds over railway
+aerial footage of moving vehicles on the road at night
+aerial view of town and park
+top view of skyscrapers
+top view of the empire state building in manhattan
+top view of the central park in new york city
+sheep running in a grass field
+clear sky over factory
+smoke and fire in birds eye view
+view of a pathway with snow melting on its side
+ferry under bridge on river near city in malaysia
+mountain slopes covered in green vegetation
+panoramic view of a town surrounded by snow covered mountains
+aerial view of a palace
+top view of vehicles driving on the intersection
+a graveyard by a church in a mountain landscape
+a modern railway station in malaysia use for public transportation
+drone footage of amsterdam metro station
+train arriving at a station
+red vehicle driving on field
+close up view of flashing emergency vehicle lighting
+vehicle with fertilizer on field
+a highway built across an agricultural area in the countryside
+drone footage of motorcycles driving on country road between agricultural fields
+a road in the woods under fog
+footage of a car driving through a wheat field
+vehicle stops for an ambulance passing through city traffic
+emergency vehicle parked outside the casino
+zombies attacking a woman and a boy inside a car
+woman seating inside the car while chewing
+video of passengers riding a double decker bus during night
+traffic in london street at night
+elderly couple checking engine of automobile
+a green vintage automobile with an open hood parked in a parking area
+close up of a prototype automobile with exposed engine on the back seat of the car
+aerial view of road in forest
+train departing from station
+aerial view of a train passing by a bridge
+video of a train tracks
+video footage of a subway
+video of blinking traffic lights
+couple walking out on the subway
+time lapse of a subway tunnel
+monitor board inside the subway
+metro train at night
+zoom in video of a tram passing by city
+young man using laptop in the tram
+man reading a book at bus stop
+close up shot of a moving taxi
+night travel in london street on a public bus
+red bus in a rainy city
+flow of traffic in the city
+close up shot of a yellow taxi turning left
+two women calling for a taxi
+drone view of an illuminated bridge across a river
+policeman in police car talking on radio
+airplane taking off at night
+view through window in airplane
+an airplane in the sky
+helicopter landing on the street
+a pilot getting out of a helicopter
+a helicopter flying under blue sky
+boat sailing in the middle of the ocean
+girl playing with a toy boat
+silhouette of a boat on sea during golden hour
+a boat travelling around the lake
+road on mountain ridge
+ship sailing on danube river
+slow motion video of a ship water trail in the sea
+drone footage of a wreck ship on shore
+a white yacht traveling on a river and passing under the bridge
+female teenagers drinking champagne in the yacht
+video of yacht sailing in the ocean
+red combine harvester on road on field
+a woman sitting on a bicycle while using a mobile phone
+a woman sitting on a motorcycle looking around
+three teenagers fixing a bicycle
+a woman in a halloween costume posing on a motorcycle
+a parked motorcycle on a foggy roadside
+cable car near sea shore
+a truck travelling in the road
+footage of the road without any traffic
+a road sign
+love padlocks on a bridge
+camera moving at highway construction site
+vehicles driving on highway
+a motorbike on highway at timelapse mode
+point of view of a car driving through a tunnel
+time lapse of heavy traffic on an avenue
+ferry boat on city canal
+black vintage car in museum
+a zigzag road across a forest
+people crossing the road
+video of a kayak boat in a river
+a person paddling a wooden boat in a lake
+a car charging in the parking area
+cars parked on the road
+footage of the street with people and vehicle passing by in the rain
+traffic on busy city street
+a woman getting out of the car to walk with their dog
+yacht sailing through the ocean
+people in queue to military ship
+man wearing motorcycle helmet looking at the camera
+empty seats in the bus
+empty boat on the water
+cargo train traveling on the mountainside
+cruise ship in harbor
+counting down at traffic lights
+pressing the car ignition
+fire truck driving on the road
+a footage of a broken bicycle
+drone footage of an ambulance on the road
+slow motion footage of a racing car
+ship sailing on sea against sunset
+big cargo ship passing on the shore
+back view of man and woman walking on unpaved road
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/all_dimension.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/all_dimension.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f26fbf80daa8be879b25c527dfe583a422d8ccf9
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/all_dimension.txt
@@ -0,0 +1,946 @@
+In a still frame, a stop sign
+a toilet, frozen in time
+a laptop, frozen in time
+A tranquil tableau of alley
+A tranquil tableau of bar
+A tranquil tableau of barn
+A tranquil tableau of bathroom
+A tranquil tableau of bedroom
+A tranquil tableau of cliff
+In a still frame, courtyard
+In a still frame, gas station
+A tranquil tableau of house
+indoor gymnasium, frozen in time
+A tranquil tableau of indoor library
+A tranquil tableau of kitchen
+A tranquil tableau of palace
+In a still frame, parking lot
+In a still frame, phone booth
+A tranquil tableau of restaurant
+A tranquil tableau of tower
+A tranquil tableau of a bowl
+A tranquil tableau of an apple
+A tranquil tableau of a bench
+A tranquil tableau of a bed
+A tranquil tableau of a chair
+A tranquil tableau of a cup
+A tranquil tableau of a dining table
+In a still frame, a pear
+A tranquil tableau of a bunch of grapes
+A tranquil tableau of a bowl on the kitchen counter
+A tranquil tableau of a beautiful, handcrafted ceramic bowl
+A tranquil tableau of an antique bowl
+A tranquil tableau of an exquisite mahogany dining table
+A tranquil tableau of a wooden bench in the park
+A tranquil tableau of a beautiful wrought-iron bench surrounded by blooming flowers
+In a still frame, a park bench with a view of the lake
+A tranquil tableau of a vintage rocking chair was placed on the porch
+A tranquil tableau of the jail cell was small and dimly lit, with cold, steel bars
+A tranquil tableau of the phone booth was tucked away in a quiet alley
+a dilapidated phone booth stood as a relic of a bygone era on the sidewalk, frozen in time
+A tranquil tableau of the old red barn stood weathered and iconic against the backdrop of the countryside
+A tranquil tableau of a picturesque barn was painted a warm shade of red and nestled in a picturesque meadow
+In a still frame, within the desolate desert, an oasis unfolded, characterized by the stoic presence of palm trees and a motionless, glassy pool of water
+In a still frame, the Parthenon's majestic Doric columns stand in serene solitude atop the Acropolis, framed by the tranquil Athenian landscape
+In a still frame, the Temple of Hephaestus, with its timeless Doric grace, stands stoically against the backdrop of a quiet Athens
+In a still frame, the ornate Victorian streetlamp stands solemnly, adorned with intricate ironwork and stained glass panels
+A tranquil tableau of the Stonehenge presented itself as an enigmatic puzzle, each colossal stone meticulously placed against the backdrop of tranquility
+In a still frame, in the vast desert, an oasis nestled among dunes, featuring tall palm trees and an air of serenity
+static view on a desert scene with an oasis, palm trees, and a clear, calm pool of water
+A tranquil tableau of an ornate Victorian streetlamp standing on a cobblestone street corner, illuminating the empty night
+A tranquil tableau of a tranquil lakeside cabin nestled among tall pines, its reflection mirrored perfectly in the calm water
+In a still frame, a vintage gas lantern, adorned with intricate details, gracing a historic cobblestone square
+In a still frame, a tranquil Japanese tea ceremony room, with tatami mats, a delicate tea set, and a bonsai tree in the corner
+A tranquil tableau of the Parthenon stands resolute in its classical elegance, a timeless symbol of Athens' cultural legacy
+A tranquil tableau of in the heart of Plaka, the neoclassical architecture of the old city harmonizes with the ancient ruins
+A tranquil tableau of in the desolate beauty of the American Southwest, Chaco Canyon's ancient ruins whispered tales of an enigmatic civilization that once thrived amidst the arid landscapes
+A tranquil tableau of at the edge of the Arabian Desert, the ancient city of Petra beckoned with its enigmatic rock-carved façades
+In a still frame, amidst the cobblestone streets, an Art Nouveau lamppost stood tall
+A tranquil tableau of in the quaint village square, a traditional wrought-iron streetlamp featured delicate filigree patterns and amber-hued glass panels
+A tranquil tableau of the lampposts were adorned with Art Deco motifs, their geometric shapes and frosted glass creating a sense of vintage glamour
+In a still frame, in the picturesque square, a Gothic-style lamppost adorned with intricate stone carvings added a touch of medieval charm to the setting
+In a still frame, in the heart of the old city, a row of ornate lantern-style streetlamps bathed the narrow alleyway in a warm, welcoming light
+A tranquil tableau of in the heart of the Utah desert, a massive sandstone arch spanned the horizon
+A tranquil tableau of in the Arizona desert, a massive stone bridge arched across a rugged canyon
+A tranquil tableau of in the corner of the minimalist tea room, a bonsai tree added a touch of nature's beauty to the otherwise simple and elegant space
+In a still frame, amidst the hushed ambiance of the traditional tea room, a meticulously arranged tea set awaited, with porcelain cups, a bamboo whisk
+In a still frame, nestled in the Zen garden, a rustic teahouse featured tatami seating and a traditional charcoal brazier
+A tranquil tableau of a country estate's library featured elegant wooden shelves
+A tranquil tableau of beneath the shade of a solitary oak tree, an old wooden park bench sat patiently
+A tranquil tableau of beside a tranquil pond, a weeping willow tree draped its branches gracefully over the water's surface, creating a serene tableau of reflection and calm
+A tranquil tableau of in the Zen garden, a perfectly raked gravel path led to a serene rock garden
+In a still frame, a tranquil pond was fringed by weeping cherry trees, their blossoms drifting lazily onto the glassy surface
+In a still frame, within the historic library's reading room, rows of antique leather chairs and mahogany tables offered a serene haven for literary contemplation
+A tranquil tableau of a peaceful orchid garden showcased a variety of delicate blooms
+A tranquil tableau of in the serene courtyard, a centuries-old stone well stood as a symbol of a bygone era, its mossy stones bearing witness to the passage of time
+a bird and a cat
+a cat and a dog
+a dog and a horse
+a horse and a sheep
+a sheep and a cow
+a cow and an elephant
+an elephant and a bear
+a bear and a zebra
+a zebra and a giraffe
+a giraffe and a bird
+a chair and a couch
+a couch and a potted plant
+a potted plant and a tv
+a tv and a laptop
+a laptop and a remote
+a remote and a keyboard
+a keyboard and a cell phone
+a cell phone and a book
+a book and a clock
+a clock and a backpack
+a backpack and an umbrella
+an umbrella and a handbag
+a handbag and a tie
+a tie and a suitcase
+a suitcase and a vase
+a vase and scissors
+scissors and a teddy bear
+a teddy bear and a frisbee
+a frisbee and skis
+skis and a snowboard
+a snowboard and a sports ball
+a sports ball and a kite
+a kite and a baseball bat
+a baseball bat and a baseball glove
+a baseball glove and a skateboard
+a skateboard and a surfboard
+a surfboard and a tennis racket
+a tennis racket and a bottle
+a bottle and a chair
+an airplane and a train
+a train and a boat
+a boat and an airplane
+a bicycle and a car
+a car and a motorcycle
+a motorcycle and a bus
+a bus and a traffic light
+a traffic light and a fire hydrant
+a fire hydrant and a stop sign
+a stop sign and a parking meter
+a parking meter and a truck
+a truck and a bicycle
+a toilet and a hair drier
+a hair drier and a toothbrush
+a toothbrush and a sink
+a sink and a toilet
+a wine glass and a chair
+a cup and a couch
+a fork and a potted plant
+a knife and a tv
+a spoon and a laptop
+a bowl and a remote
+a banana and a keyboard
+an apple and a cell phone
+a sandwich and a book
+an orange and a clock
+broccoli and a backpack
+a carrot and an umbrella
+a hot dog and a handbag
+a pizza and a tie
+a donut and a suitcase
+a cake and a vase
+an oven and scissors
+a toaster and a teddy bear
+a microwave and a frisbee
+a refrigerator and skis
+a bicycle and an airplane
+a car and a train
+a motorcycle and a boat
+a person and a toilet
+a person and a hair drier
+a person and a toothbrush
+a person and a sink
+A person is riding a bike
+A person is marching
+A person is roller skating
+A person is tasting beer
+A person is clapping
+A person is drawing
+A person is petting animal (not cat)
+A person is eating watermelon
+A person is playing harp
+A person is wrestling
+A person is riding scooter
+A person is sweeping floor
+A person is skateboarding
+A person is dunking basketball
+A person is playing flute
+A person is stretching leg
+A person is tying tie
+A person is skydiving
+A person is shooting goal (soccer)
+A person is playing piano
+A person is finger snapping
+A person is canoeing or kayaking
+A person is laughing
+A person is digging
+A person is clay pottery making
+A person is shooting basketball
+A person is bending back
+A person is shaking hands
+A person is bandaging
+A person is push up
+A person is catching or throwing frisbee
+A person is playing trumpet
+A person is flying kite
+A person is filling eyebrows
+A person is shuffling cards
+A person is folding clothes
+A person is smoking
+A person is tai chi
+A person is squat
+A person is playing controller
+A person is throwing axe
+A person is giving or receiving award
+A person is air drumming
+A person is taking a shower
+A person is planting trees
+A person is sharpening knives
+A person is robot dancing
+A person is rock climbing
+A person is hula hooping
+A person is writing
+A person is bungee jumping
+A person is pushing cart
+A person is cleaning windows
+A person is cutting watermelon
+A person is cheerleading
+A person is washing hands
+A person is ironing
+A person is cutting nails
+A person is hugging
+A person is trimming or shaving beard
+A person is jogging
+A person is making bed
+A person is washing dishes
+A person is grooming dog
+A person is doing laundry
+A person is knitting
+A person is reading book
+A person is baby waking up
+A person is massaging legs
+A person is brushing teeth
+A person is crawling baby
+A person is motorcycling
+A person is driving car
+A person is sticking tongue out
+A person is shaking head
+A person is sword fighting
+A person is doing aerobics
+A person is strumming guitar
+A person is riding or walking with horse
+A person is archery
+A person is catching or throwing baseball
+A person is playing chess
+A person is rock scissors paper
+A person is using computer
+A person is arranging flowers
+A person is bending metal
+A person is ice skating
+A person is climbing a rope
+A person is crying
+A person is dancing ballet
+A person is getting a haircut
+A person is running on treadmill
+A person is kissing
+A person is counting money
+A person is barbequing
+A person is peeling apples
+A person is milking cow
+A person is shining shoes
+A person is making snowman
+A person is sailing
+a person swimming in ocean
+a person giving a presentation to a room full of colleagues
+a person washing the dishes
+a person eating a burger
+a person walking in the snowstorm
+a person drinking coffee in a cafe
+a person playing guitar
+a bicycle leaning against a tree
+a bicycle gliding through a snowy field
+a bicycle slowing down to stop
+a bicycle accelerating to gain speed
+a car stuck in traffic during rush hour
+a car turning a corner
+a car slowing down to stop
+a car accelerating to gain speed
+a motorcycle cruising along a coastal highway
+a motorcycle turning a corner
+a motorcycle slowing down to stop
+a motorcycle gliding through a snowy field
+a motorcycle accelerating to gain speed
+an airplane soaring through a clear blue sky
+an airplane taking off
+an airplane landing smoothly on a runway
+an airplane accelerating to gain speed
+a bus turning a corner
+a bus stuck in traffic during rush hour
+a bus accelerating to gain speed
+a train speeding down the tracks
+a train crossing over a tall bridge
+a train accelerating to gain speed
+a truck turning a corner
+a truck anchored in a tranquil bay
+a truck stuck in traffic during rush hour
+a truck slowing down to stop
+a truck accelerating to gain speed
+a boat sailing smoothly on a calm lake
+a boat slowing down to stop
+a boat accelerating to gain speed
+a bird soaring gracefully in the sky
+a bird building a nest from twigs and leaves
+a bird flying over a snowy forest
+a cat grooming itself meticulously with its tongue
+a cat playing in park
+a cat drinking water
+a cat running happily
+a dog enjoying a peaceful walk
+a dog playing in park
+a dog drinking water
+a dog running happily
+a horse bending down to drink water from a river
+a horse galloping across an open field
+a horse taking a peaceful walk
+a horse running to join a herd of its kind
+a sheep bending down to drink water from a river
+a sheep taking a peaceful walk
+a sheep running to join a herd of its kind
+a cow bending down to drink water from a river
+a cow chewing cud while resting in a tranquil barn
+a cow running to join a herd of its kind
+an elephant spraying itself with water using its trunk to cool down
+an elephant taking a peaceful walk
+an elephant running to join a herd of its kind
+a bear catching a salmon in its powerful jaws
+a bear sniffing the air for scents of food
+a bear climbing a tree
+a bear hunting for prey
+a zebra bending down to drink water from a river
+a zebra running to join a herd of its kind
+a zebra taking a peaceful walk
+a giraffe bending down to drink water from a river
+a giraffe taking a peaceful walk
+a giraffe running to join a herd of its kind
+a person
+a bicycle
+a car
+a motorcycle
+an airplane
+a bus
+a train
+a truck
+a boat
+a traffic light
+a fire hydrant
+a stop sign
+a parking meter
+a bench
+a bird
+a cat
+a dog
+a horse
+a sheep
+a cow
+an elephant
+a bear
+a zebra
+a giraffe
+a backpack
+an umbrella
+a handbag
+a tie
+a suitcase
+a frisbee
+skis
+a snowboard
+a sports ball
+a kite
+a baseball bat
+a baseball glove
+a skateboard
+a surfboard
+a tennis racket
+a bottle
+a wine glass
+a cup
+a fork
+a knife
+a spoon
+a bowl
+a banana
+an apple
+a sandwich
+an orange
+broccoli
+a carrot
+a hot dog
+a pizza
+a donut
+a cake
+a chair
+a couch
+a potted plant
+a bed
+a dining table
+a toilet
+a tv
+a laptop
+a remote
+a keyboard
+a cell phone
+a microwave
+an oven
+a toaster
+a sink
+a refrigerator
+a book
+a clock
+a vase
+scissors
+a teddy bear
+a hair drier
+a toothbrush
+a red bicycle
+a green bicycle
+a blue bicycle
+a yellow bicycle
+an orange bicycle
+a purple bicycle
+a pink bicycle
+a black bicycle
+a white bicycle
+a red car
+a green car
+a blue car
+a yellow car
+an orange car
+a purple car
+a pink car
+a black car
+a white car
+a red bird
+a green bird
+a blue bird
+a yellow bird
+an orange bird
+a purple bird
+a pink bird
+a black bird
+a white bird
+a black cat
+a white cat
+an orange cat
+a yellow cat
+a red umbrella
+a green umbrella
+a blue umbrella
+a yellow umbrella
+an orange umbrella
+a purple umbrella
+a pink umbrella
+a black umbrella
+a white umbrella
+a red suitcase
+a green suitcase
+a blue suitcase
+a yellow suitcase
+an orange suitcase
+a purple suitcase
+a pink suitcase
+a black suitcase
+a white suitcase
+a red bowl
+a green bowl
+a blue bowl
+a yellow bowl
+an orange bowl
+a purple bowl
+a pink bowl
+a black bowl
+a white bowl
+a red chair
+a green chair
+a blue chair
+a yellow chair
+an orange chair
+a purple chair
+a pink chair
+a black chair
+a white chair
+a red clock
+a green clock
+a blue clock
+a yellow clock
+an orange clock
+a purple clock
+a pink clock
+a black clock
+a white clock
+a red vase
+a green vase
+a blue vase
+a yellow vase
+an orange vase
+a purple vase
+a pink vase
+a black vase
+a white vase
+A beautiful coastal beach in spring, waves lapping on sand, Van Gogh style
+A beautiful coastal beach in spring, waves lapping on sand, oil painting
+A beautiful coastal beach in spring, waves lapping on sand by Hokusai, in the style of Ukiyo
+A beautiful coastal beach in spring, waves lapping on sand, black and white
+A beautiful coastal beach in spring, waves lapping on sand, pixel art
+A beautiful coastal beach in spring, waves lapping on sand, in cyberpunk style
+A beautiful coastal beach in spring, waves lapping on sand, animated style
+A beautiful coastal beach in spring, waves lapping on sand, watercolor painting
+A beautiful coastal beach in spring, waves lapping on sand, surrealism style
+The bund Shanghai, Van Gogh style
+The bund Shanghai, oil painting
+The bund Shanghai by Hokusai, in the style of Ukiyo
+The bund Shanghai, black and white
+The bund Shanghai, pixel art
+The bund Shanghai, in cyberpunk style
+The bund Shanghai, animated style
+The bund Shanghai, watercolor painting
+The bund Shanghai, surrealism style
+a shark is swimming in the ocean, Van Gogh style
+a shark is swimming in the ocean, oil painting
+a shark is swimming in the ocean by Hokusai, in the style of Ukiyo
+a shark is swimming in the ocean, black and white
+a shark is swimming in the ocean, pixel art
+a shark is swimming in the ocean, in cyberpunk style
+a shark is swimming in the ocean, animated style
+a shark is swimming in the ocean, watercolor painting
+a shark is swimming in the ocean, surrealism style
+A panda drinking coffee in a cafe in Paris, Van Gogh style
+A panda drinking coffee in a cafe in Paris, oil painting
+A panda drinking coffee in a cafe in Paris by Hokusai, in the style of Ukiyo
+A panda drinking coffee in a cafe in Paris, black and white
+A panda drinking coffee in a cafe in Paris, pixel art
+A panda drinking coffee in a cafe in Paris, in cyberpunk style
+A panda drinking coffee in a cafe in Paris, animated style
+A panda drinking coffee in a cafe in Paris, watercolor painting
+A panda drinking coffee in a cafe in Paris, surrealism style
+A cute happy Corgi playing in park, sunset, Van Gogh style
+A cute happy Corgi playing in park, sunset, oil painting
+A cute happy Corgi playing in park, sunset by Hokusai, in the style of Ukiyo
+A cute happy Corgi playing in park, sunset, black and white
+A cute happy Corgi playing in park, sunset, pixel art
+A cute happy Corgi playing in park, sunset, in cyberpunk style
+A cute happy Corgi playing in park, sunset, animated style
+A cute happy Corgi playing in park, sunset, watercolor painting
+A cute happy Corgi playing in park, sunset, surrealism style
+Gwen Stacy reading a book, Van Gogh style
+Gwen Stacy reading a book, oil painting
+Gwen Stacy reading a book by Hokusai, in the style of Ukiyo
+Gwen Stacy reading a book, black and white
+Gwen Stacy reading a book, pixel art
+Gwen Stacy reading a book, in cyberpunk style
+Gwen Stacy reading a book, animated style
+Gwen Stacy reading a book, watercolor painting
+Gwen Stacy reading a book, surrealism style
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, Van Gogh style
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, oil painting
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background by Hokusai, in the style of Ukiyo
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, black and white
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, pixel art
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, in cyberpunk style
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, animated style
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, watercolor painting
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, surrealism style
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, Van Gogh style
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, oil painting
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas by Hokusai, in the style of Ukiyo
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, black and white
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, pixel art
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, in cyberpunk style
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, animated style
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, watercolor painting
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, surrealism style
+An astronaut flying in space, Van Gogh style
+An astronaut flying in space, oil painting
+An astronaut flying in space by Hokusai, in the style of Ukiyo
+An astronaut flying in space, black and white
+An astronaut flying in space, pixel art
+An astronaut flying in space, in cyberpunk style
+An astronaut flying in space, animated style
+An astronaut flying in space, watercolor painting
+An astronaut flying in space, surrealism style
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, Van Gogh style
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, oil painting
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks by Hokusai, in the style of Ukiyo
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, black and white
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, pixel art
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, in cyberpunk style
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, animated style
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, watercolor painting
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, surrealism style
+A beautiful coastal beach in spring, waves lapping on sand, in super slow motion
+A beautiful coastal beach in spring, waves lapping on sand, zoom in
+A beautiful coastal beach in spring, waves lapping on sand, zoom out
+A beautiful coastal beach in spring, waves lapping on sand, pan left
+A beautiful coastal beach in spring, waves lapping on sand, pan right
+A beautiful coastal beach in spring, waves lapping on sand, tilt up
+A beautiful coastal beach in spring, waves lapping on sand, tilt down
+A beautiful coastal beach in spring, waves lapping on sand, with an intense shaking effect
+A beautiful coastal beach in spring, waves lapping on sand, featuring a steady and smooth perspective
+A beautiful coastal beach in spring, waves lapping on sand, racking focus
+The bund Shanghai, in super slow motion
+The bund Shanghai, zoom in
+The bund Shanghai, zoom out
+The bund Shanghai, pan left
+The bund Shanghai, pan right
+The bund Shanghai, tilt up
+The bund Shanghai, tilt down
+The bund Shanghai, with an intense shaking effect
+The bund Shanghai, featuring a steady and smooth perspective
+The bund Shanghai, racking focus
+a shark is swimming in the ocean, in super slow motion
+a shark is swimming in the ocean, zoom in
+a shark is swimming in the ocean, zoom out
+a shark is swimming in the ocean, pan left
+a shark is swimming in the ocean, pan right
+a shark is swimming in the ocean, tilt up
+a shark is swimming in the ocean, tilt down
+a shark is swimming in the ocean, with an intense shaking effect
+a shark is swimming in the ocean, featuring a steady and smooth perspective
+a shark is swimming in the ocean, racking focus
+A panda drinking coffee in a cafe in Paris, in super slow motion
+A panda drinking coffee in a cafe in Paris, zoom in
+A panda drinking coffee in a cafe in Paris, zoom out
+A panda drinking coffee in a cafe in Paris, pan left
+A panda drinking coffee in a cafe in Paris, pan right
+A panda drinking coffee in a cafe in Paris, tilt up
+A panda drinking coffee in a cafe in Paris, tilt down
+A panda drinking coffee in a cafe in Paris, with an intense shaking effect
+A panda drinking coffee in a cafe in Paris, featuring a steady and smooth perspective
+A panda drinking coffee in a cafe in Paris, racking focus
+A cute happy Corgi playing in park, sunset, in super slow motion
+A cute happy Corgi playing in park, sunset, zoom in
+A cute happy Corgi playing in park, sunset, zoom out
+A cute happy Corgi playing in park, sunset, pan left
+A cute happy Corgi playing in park, sunset, pan right
+A cute happy Corgi playing in park, sunset, tilt up
+A cute happy Corgi playing in park, sunset, tilt down
+A cute happy Corgi playing in park, sunset, with an intense shaking effect
+A cute happy Corgi playing in park, sunset, featuring a steady and smooth perspective
+A cute happy Corgi playing in park, sunset, racking focus
+Gwen Stacy reading a book, in super slow motion
+Gwen Stacy reading a book, zoom in
+Gwen Stacy reading a book, zoom out
+Gwen Stacy reading a book, pan left
+Gwen Stacy reading a book, pan right
+Gwen Stacy reading a book, tilt up
+Gwen Stacy reading a book, tilt down
+Gwen Stacy reading a book, with an intense shaking effect
+Gwen Stacy reading a book, featuring a steady and smooth perspective
+Gwen Stacy reading a book, racking focus
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, in super slow motion
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, zoom in
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, zoom out
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, pan left
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, pan right
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, tilt up
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, tilt down
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, with an intense shaking effect
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, featuring a steady and smooth perspective
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, racking focus
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, in super slow motion
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, zoom in
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, zoom out
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, pan left
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, pan right
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, tilt up
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, tilt down
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, with an intense shaking effect
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, featuring a steady and smooth perspective
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, racking focus
+An astronaut flying in space, in super slow motion
+An astronaut flying in space, zoom in
+An astronaut flying in space, zoom out
+An astronaut flying in space, pan left
+An astronaut flying in space, pan right
+An astronaut flying in space, tilt up
+An astronaut flying in space, tilt down
+An astronaut flying in space, with an intense shaking effect
+An astronaut flying in space, featuring a steady and smooth perspective
+An astronaut flying in space, racking focus
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, in super slow motion
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, zoom in
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, zoom out
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, pan left
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, pan right
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, tilt up
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, tilt down
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, with an intense shaking effect
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, featuring a steady and smooth perspective
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, racking focus
+Close up of grapes on a rotating table.
+Turtle swimming in ocean.
+A storm trooper vacuuming the beach.
+A panda standing on a surfboard in the ocean in sunset.
+An astronaut feeding ducks on a sunny afternoon, reflection from the water.
+Two pandas discussing an academic paper.
+Sunset time lapse at the beach with moving clouds and colors in the sky.
+A fat rabbit wearing a purple robe walking through a fantasy landscape.
+A koala bear playing piano in the forest.
+An astronaut flying in space.
+Fireworks.
+An animated painting of fluffy white clouds moving in sky.
+Flying through fantasy landscapes.
+A bigfoot walking in the snowstorm.
+A squirrel eating a burger.
+A cat wearing sunglasses and working as a lifeguard at a pool.
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks.
+Splash of turquoise water in extreme slow motion, alpha channel included.
+an ice cream is melting on the table.
+a drone flying over a snowy forest.
+a shark is swimming in the ocean.
+Aerial panoramic video from a drone of a fantasy land.
+a teddy bear is swimming in the ocean.
+time lapse of sunrise on mars.
+golden fish swimming in the ocean.
+An artist brush painting on a canvas close up.
+A drone view of celebration with Christmas tree and fireworks, starry sky - background.
+happy dog wearing a yellow turtleneck, studio, portrait, facing camera, dark background
+Origami dancers in white paper, 3D render, on white background, studio shot, dancing modern dance.
+Campfire at night in a snowy forest with starry sky in the background.
+a fantasy landscape
+A 3D model of a 1800s victorian house.
+this is how I do makeup in the morning.
+A raccoon that looks like a turtle, digital art.
+Robot dancing in Times Square.
+Busy freeway at night.
+Balloon full of water exploding in extreme slow motion.
+An astronaut is riding a horse in the space in a photorealistic style.
+Macro slo-mo. Slow motion cropped closeup of roasted coffee beans falling into an empty bowl.
+Sewing machine, old sewing machine working.
+Motion colour drop in water, ink swirling in water, colourful ink in water, abstraction fancy dream cloud of ink.
+Few big purple plums rotating on the turntable. water drops appear on the skin during rotation. isolated on the white background. close-up. macro.
+Vampire makeup face of beautiful girl, red contact lenses.
+Ashtray full of butts on table, smoke flowing on black background, close-up
+Pacific coast, carmel by the sea ocean and waves.
+A teddy bear is playing drum kit in NYC Times Square.
+A corgi is playing drum kit.
+An Iron man is playing the electronic guitar, high electronic guitar.
+A raccoon is playing the electronic guitar.
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background by Vincent van Gogh
+A corgi's head depicted as an explosion of a nebula
+A fantasy landscape
+A future where humans have achieved teleportation technology
+A jellyfish floating through the ocean, with bioluminescent tentacles
+A Mars rover moving on Mars
+A panda drinking coffee in a cafe in Paris
+A space shuttle launching into orbit, with flames and smoke billowing out from the engines
+A steam train moving on a mountainside
+A super cool giant robot in Cyberpunk Beijing
+A tropical beach at sunrise, with palm trees and crystal-clear water in the foreground
+Cinematic shot of Van Gogh's selfie, Van Gogh style
+Gwen Stacy reading a book
+Iron Man flying in the sky
+The bund Shanghai, oil painting
+Yoda playing guitar on the stage
+A beautiful coastal beach in spring, waves lapping on sand by Hokusai, in the style of Ukiyo
+A beautiful coastal beach in spring, waves lapping on sand by Vincent van Gogh
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background
+A car moving slowly on an empty street, rainy evening
+A cat eating food out of a bowl
+A cat wearing sunglasses at a pool
+A confused panda in calculus class
+A cute fluffy panda eating Chinese food in a restaurant
+A cute happy Corgi playing in park, sunset
+A cute raccoon playing guitar in a boat on the ocean
+A happy fuzzy panda playing guitar nearby a campfire, snow mountain in the background
+A lightning striking atop of eiffel tower, dark clouds in the sky
+A modern art museum, with colorful paintings
+A panda cooking in the kitchen
+A panda playing on a swing set
+A polar bear is playing guitar
+A raccoon dressed in suit playing the trumpet, stage background
+A robot DJ is playing the turntable, in heavy raining futuristic tokyo rooftop cyberpunk night, sci-fi, fantasy
+A shark swimming in clear Caribbean ocean
+A super robot protecting city
+A teddy bear washing the dishes
+An epic tornado attacking above a glowing city at night, the tornado is made of smoke
+An oil painting of a couple in formal evening wear going home get caught in a heavy downpour with umbrellas
+Clown fish swimming through the coral reef
+Hyper-realistic spaceship landing on Mars
+The bund Shanghai, vibrant color
+Vincent van Gogh is painting in the room
+Yellow flowers swing in the wind
+alley
+amusement park
+aquarium
+arch
+art gallery
+bathroom
+bakery shop
+ballroom
+bar
+barn
+basement
+beach
+bedroom
+bridge
+botanical garden
+cafeteria
+campsite
+campus
+carrousel
+castle
+cemetery
+classroom
+cliff
+crosswalk
+construction site
+corridor
+courtyard
+desert
+downtown
+driveway
+farm
+food court
+football field
+forest road
+fountain
+gas station
+glacier
+golf course
+indoor gymnasium
+harbor
+highway
+hospital
+house
+iceberg
+industrial area
+jail cell
+junkyard
+kitchen
+indoor library
+lighthouse
+laboratory
+mansion
+marsh
+mountain
+indoor movie theater
+indoor museum
+music studio
+nursery
+ocean
+office
+palace
+parking lot
+pharmacy
+phone booth
+raceway
+restaurant
+river
+science museum
+shower
+ski slope
+sky
+skyscraper
+baseball stadium
+staircase
+street
+supermarket
+indoor swimming pool
+tower
+outdoor track
+train railway
+train station platform
+underwater coral reef
+valley
+volcano
+waterfall
+windmill
+a bicycle on the left of a car, front view
+a car on the right of a motorcycle, front view
+a motorcycle on the left of a bus, front view
+a bus on the right of a traffic light, front view
+a traffic light on the left of a fire hydrant, front view
+a fire hydrant on the right of a stop sign, front view
+a stop sign on the left of a parking meter, front view
+a parking meter on the right of a bench, front view
+a bench on the left of a truck, front view
+a truck on the right of a bicycle, front view
+a bird on the left of a cat, front view
+a cat on the right of a dog, front view
+a dog on the left of a horse, front view
+a horse on the right of a sheep, front view
+a sheep on the left of a cow, front view
+a cow on the right of an elephant, front view
+an elephant on the left of a bear, front view
+a bear on the right of a zebra, front view
+a zebra on the left of a giraffe, front view
+a giraffe on the right of a bird, front view
+a bottle on the left of a wine glass, front view
+a wine glass on the right of a cup, front view
+a cup on the left of a fork, front view
+a fork on the right of a knife, front view
+a knife on the left of a spoon, front view
+a spoon on the right of a bowl, front view
+a bowl on the left of a bottle, front view
+a potted plant on the left of a remote, front view
+a remote on the right of a clock, front view
+a clock on the left of a vase, front view
+a vase on the right of scissors, front view
+scissors on the left of a teddy bear, front view
+a teddy bear on the right of a potted plant, front view
+a frisbee on the left of a sports ball, front view
+a sports ball on the right of a baseball bat, front view
+a baseball bat on the left of a baseball glove, front view
+a baseball glove on the right of a tennis racket, front view
+a tennis racket on the left of a frisbee, front view
+a toilet on the left of a hair drier, front view
+a hair drier on the right of a toothbrush, front view
+a toothbrush on the left of a sink, front view
+a sink on the right of a toilet, front view
+a chair on the left of a couch, front view
+a couch on the right of a bed, front view
+a bed on the left of a tv, front view
+a tv on the right of a dining table, front view
+a dining table on the left of a chair, front view
+an airplane on the left of a train, front view
+a train on the right of a boat, front view
+a boat on the left of an airplane, front view
+an oven on the top of a toaster, front view
+an oven on the bottom of a toaster, front view
+a toaster on the top of a microwave, front view
+a toaster on the bottom of a microwave, front view
+a microwave on the top of an oven, front view
+a microwave on the bottom of an oven, front view
+a banana on the top of an apple, front view
+a banana on the bottom of an apple, front view
+an apple on the top of a sandwich, front view
+an apple on the bottom of a sandwich, front view
+a sandwich on the top of an orange, front view
+a sandwich on the bottom of an orange, front view
+an orange on the top of a carrot, front view
+an orange on the bottom of a carrot, front view
+a carrot on the top of a hot dog, front view
+a carrot on the bottom of a hot dog, front view
+a hot dog on the top of a pizza, front view
+a hot dog on the bottom of a pizza, front view
+a pizza on the top of a donut, front view
+a pizza on the bottom of a donut, front view
+a donut on the top of broccoli, front view
+a donut on the bottom of broccoli, front view
+broccoli on the top of a banana, front view
+broccoli on the bottom of a banana, front view
+skis on the top of a snowboard, front view
+skis on the bottom of a snowboard, front view
+a snowboard on the top of a kite, front view
+a snowboard on the bottom of a kite, front view
+a kite on the top of a skateboard, front view
+a kite on the bottom of a skateboard, front view
+a skateboard on the top of a surfboard, front view
+a skateboard on the bottom of a surfboard, front view
+a surfboard on the top of skis, front view
+a surfboard on the bottom of skis, front view
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/all_i2v.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/all_i2v.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b09ba72be2af5927f7678535b9a0f9f3866a8a8a
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/all_i2v.txt
@@ -0,0 +1,1118 @@
+a close up of a blue and orange liquid{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close up of a blue and orange liquid.jpg", "mask_strategy": "0"}
+a close up of a blue and orange liquid, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close up of a blue and orange liquid.jpg", "mask_strategy": "0"}
+a close up of a blue and orange liquid, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close up of a blue and orange liquid.jpg", "mask_strategy": "0"}
+a close up of a blue and orange liquid, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close up of a blue and orange liquid.jpg", "mask_strategy": "0"}
+a close up of a blue and orange liquid, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close up of a blue and orange liquid.jpg", "mask_strategy": "0"}
+a close up of a blue and orange liquid, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close up of a blue and orange liquid.jpg", "mask_strategy": "0"}
+a close up of a blue and orange liquid, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close up of a blue and orange liquid.jpg", "mask_strategy": "0"}
+a close up of a blue and orange liquid, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close up of a blue and orange liquid.jpg", "mask_strategy": "0"}
+A black and white abstract video featuring mesmerizing bubbles{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A black and white abstract video featuring mesmerizing bubbles.jpg", "mask_strategy": "0"}
+A black and white abstract video featuring mesmerizing bubbles, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A black and white abstract video featuring mesmerizing bubbles.jpg", "mask_strategy": "0"}
+A black and white abstract video featuring mesmerizing bubbles, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A black and white abstract video featuring mesmerizing bubbles.jpg", "mask_strategy": "0"}
+A black and white abstract video featuring mesmerizing bubbles, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A black and white abstract video featuring mesmerizing bubbles.jpg", "mask_strategy": "0"}
+A black and white abstract video featuring mesmerizing bubbles, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A black and white abstract video featuring mesmerizing bubbles.jpg", "mask_strategy": "0"}
+A black and white abstract video featuring mesmerizing bubbles, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A black and white abstract video featuring mesmerizing bubbles.jpg", "mask_strategy": "0"}
+A black and white abstract video featuring mesmerizing bubbles, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A black and white abstract video featuring mesmerizing bubbles.jpg", "mask_strategy": "0"}
+A black and white abstract video featuring mesmerizing bubbles, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A black and white abstract video featuring mesmerizing bubbles.jpg", "mask_strategy": "0"}
+a blue and white smoke is swirly in the dark{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a blue and white smoke is swirly in the dark.jpg", "mask_strategy": "0"}
+a blue and white smoke is swirly in the dark, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a blue and white smoke is swirly in the dark.jpg", "mask_strategy": "0"}
+a blue and white smoke is swirly in the dark, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a blue and white smoke is swirly in the dark.jpg", "mask_strategy": "0"}
+a blue and white smoke is swirly in the dark, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a blue and white smoke is swirly in the dark.jpg", "mask_strategy": "0"}
+a blue and white smoke is swirly in the dark, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a blue and white smoke is swirly in the dark.jpg", "mask_strategy": "0"}
+a blue and white smoke is swirly in the dark, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a blue and white smoke is swirly in the dark.jpg", "mask_strategy": "0"}
+a blue and white smoke is swirly in the dark, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a blue and white smoke is swirly in the dark.jpg", "mask_strategy": "0"}
+a blue and white smoke is swirly in the dark, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a blue and white smoke is swirly in the dark.jpg", "mask_strategy": "0"}
+a close-up view of a sea fan in the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of a sea fan in the water.jpg", "mask_strategy": "0"}
+a close-up view of a sea fan in the water, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of a sea fan in the water.jpg", "mask_strategy": "0"}
+a close-up view of a sea fan in the water, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of a sea fan in the water.jpg", "mask_strategy": "0"}
+a close-up view of a sea fan in the water, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of a sea fan in the water.jpg", "mask_strategy": "0"}
+a close-up view of a sea fan in the water, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of a sea fan in the water.jpg", "mask_strategy": "0"}
+a close-up view of a sea fan in the water, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of a sea fan in the water.jpg", "mask_strategy": "0"}
+a close-up view of a sea fan in the water, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of a sea fan in the water.jpg", "mask_strategy": "0"}
+a close-up view of a sea fan in the water, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of a sea fan in the water.jpg", "mask_strategy": "0"}
+a visually captivating abstract video, rich in color, set against a dramatic black background{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a visually captivating abstract video, rich in color, set against a dramatic black background.jpg", "mask_strategy": "0"}
+a visually captivating abstract video, rich in color, set against a dramatic black background, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a visually captivating abstract video, rich in color, set against a dramatic black background.jpg", "mask_strategy": "0"}
+a visually captivating abstract video, rich in color, set against a dramatic black background, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a visually captivating abstract video, rich in color, set against a dramatic black background.jpg", "mask_strategy": "0"}
+a visually captivating abstract video, rich in color, set against a dramatic black background, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a visually captivating abstract video, rich in color, set against a dramatic black background.jpg", "mask_strategy": "0"}
+a visually captivating abstract video, rich in color, set against a dramatic black background, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a visually captivating abstract video, rich in color, set against a dramatic black background.jpg", "mask_strategy": "0"}
+a visually captivating abstract video, rich in color, set against a dramatic black background, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a visually captivating abstract video, rich in color, set against a dramatic black background.jpg", "mask_strategy": "0"}
+a visually captivating abstract video, rich in color, set against a dramatic black background, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a visually captivating abstract video, rich in color, set against a dramatic black background.jpg", "mask_strategy": "0"}
+a visually captivating abstract video, rich in color, set against a dramatic black background, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a visually captivating abstract video, rich in color, set against a dramatic black background.jpg", "mask_strategy": "0"}
+a purple and yellow abstract painting with a black background{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a purple and yellow abstract painting with a black background.jpg", "mask_strategy": "0"}
+a purple and yellow abstract painting with a black background, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a purple and yellow abstract painting with a black background.jpg", "mask_strategy": "0"}
+a purple and yellow abstract painting with a black background, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a purple and yellow abstract painting with a black background.jpg", "mask_strategy": "0"}
+a purple and yellow abstract painting with a black background, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a purple and yellow abstract painting with a black background.jpg", "mask_strategy": "0"}
+a purple and yellow abstract painting with a black background, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a purple and yellow abstract painting with a black background.jpg", "mask_strategy": "0"}
+a purple and yellow abstract painting with a black background, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a purple and yellow abstract painting with a black background.jpg", "mask_strategy": "0"}
+a purple and yellow abstract painting with a black background, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a purple and yellow abstract painting with a black background.jpg", "mask_strategy": "0"}
+a purple and yellow abstract painting with a black background, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a purple and yellow abstract painting with a black background.jpg", "mask_strategy": "0"}
+a dynamic video of a blurry neon light in the dark, radiating captivating colors{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dynamic video of a blurry neon light in the dark, radiating captivating colors.jpg", "mask_strategy": "0"}
+a dynamic video of a blurry neon light in the dark, radiating captivating colors, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dynamic video of a blurry neon light in the dark, radiating captivating colors.jpg", "mask_strategy": "0"}
+a dynamic video of a blurry neon light in the dark, radiating captivating colors, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dynamic video of a blurry neon light in the dark, radiating captivating colors.jpg", "mask_strategy": "0"}
+a dynamic video of a blurry neon light in the dark, radiating captivating colors, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dynamic video of a blurry neon light in the dark, radiating captivating colors.jpg", "mask_strategy": "0"}
+a dynamic video of a blurry neon light in the dark, radiating captivating colors, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dynamic video of a blurry neon light in the dark, radiating captivating colors.jpg", "mask_strategy": "0"}
+a dynamic video of a blurry neon light in the dark, radiating captivating colors, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dynamic video of a blurry neon light in the dark, radiating captivating colors.jpg", "mask_strategy": "0"}
+a dynamic video of a blurry neon light in the dark, radiating captivating colors, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dynamic video of a blurry neon light in the dark, radiating captivating colors.jpg", "mask_strategy": "0"}
+a dynamic video of a blurry neon light in the dark, radiating captivating colors, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dynamic video of a blurry neon light in the dark, radiating captivating colors.jpg", "mask_strategy": "0"}
+a view of a star trail in the night sky{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a star trail in the night sky.jpg", "mask_strategy": "0"}
+a view of a star trail in the night sky, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a star trail in the night sky.jpg", "mask_strategy": "0"}
+a view of a star trail in the night sky, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a star trail in the night sky.jpg", "mask_strategy": "0"}
+a view of a star trail in the night sky, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a star trail in the night sky.jpg", "mask_strategy": "0"}
+a view of a star trail in the night sky, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a star trail in the night sky.jpg", "mask_strategy": "0"}
+a view of a star trail in the night sky, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a star trail in the night sky.jpg", "mask_strategy": "0"}
+a view of a star trail in the night sky, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a star trail in the night sky.jpg", "mask_strategy": "0"}
+a view of a star trail in the night sky, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a star trail in the night sky.jpg", "mask_strategy": "0"}
+an aerial view of a small town on the edge of the ocean{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a small town on the edge of the ocean.jpg", "mask_strategy": "0"}
+an aerial view of a small town on the edge of the ocean, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a small town on the edge of the ocean.jpg", "mask_strategy": "0"}
+an aerial view of a small town on the edge of the ocean, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a small town on the edge of the ocean.jpg", "mask_strategy": "0"}
+an aerial view of a small town on the edge of the ocean, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a small town on the edge of the ocean.jpg", "mask_strategy": "0"}
+an aerial view of a small town on the edge of the ocean, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a small town on the edge of the ocean.jpg", "mask_strategy": "0"}
+an aerial view of a small town on the edge of the ocean, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a small town on the edge of the ocean.jpg", "mask_strategy": "0"}
+an aerial view of a small town on the edge of the ocean, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a small town on the edge of the ocean.jpg", "mask_strategy": "0"}
+an aerial view of a small town on the edge of the ocean, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a small town on the edge of the ocean.jpg", "mask_strategy": "0"}
+Colorful buildings on the seaside cliffs{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/Colorful buildings on the seaside cliffs.jpg", "mask_strategy": "0"}
+Colorful buildings on the seaside cliffs, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/Colorful buildings on the seaside cliffs.jpg", "mask_strategy": "0"}
+Colorful buildings on the seaside cliffs, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/Colorful buildings on the seaside cliffs.jpg", "mask_strategy": "0"}
+Colorful buildings on the seaside cliffs, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/Colorful buildings on the seaside cliffs.jpg", "mask_strategy": "0"}
+Colorful buildings on the seaside cliffs, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/Colorful buildings on the seaside cliffs.jpg", "mask_strategy": "0"}
+Colorful buildings on the seaside cliffs, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/Colorful buildings on the seaside cliffs.jpg", "mask_strategy": "0"}
+Colorful buildings on the seaside cliffs, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/Colorful buildings on the seaside cliffs.jpg", "mask_strategy": "0"}
+Colorful buildings on the seaside cliffs, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/Colorful buildings on the seaside cliffs.jpg", "mask_strategy": "0"}
+a bunch of houses that are on a hillside{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bunch of houses that are on a hillside.jpg", "mask_strategy": "0"}
+a bunch of houses that are on a hillside, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bunch of houses that are on a hillside.jpg", "mask_strategy": "0"}
+a bunch of houses that are on a hillside, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bunch of houses that are on a hillside.jpg", "mask_strategy": "0"}
+a bunch of houses that are on a hillside, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bunch of houses that are on a hillside.jpg", "mask_strategy": "0"}
+a bunch of houses that are on a hillside, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bunch of houses that are on a hillside.jpg", "mask_strategy": "0"}
+a bunch of houses that are on a hillside, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bunch of houses that are on a hillside.jpg", "mask_strategy": "0"}
+a bunch of houses that are on a hillside, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bunch of houses that are on a hillside.jpg", "mask_strategy": "0"}
+a bunch of houses that are on a hillside, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bunch of houses that are on a hillside.jpg", "mask_strategy": "0"}
+a building that is sitting on the side of a pond{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a building that is sitting on the side of a pond.jpg", "mask_strategy": "0"}
+a building that is sitting on the side of a pond, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a building that is sitting on the side of a pond.jpg", "mask_strategy": "0"}
+a building that is sitting on the side of a pond, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a building that is sitting on the side of a pond.jpg", "mask_strategy": "0"}
+a building that is sitting on the side of a pond, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a building that is sitting on the side of a pond.jpg", "mask_strategy": "0"}
+a building that is sitting on the side of a pond, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a building that is sitting on the side of a pond.jpg", "mask_strategy": "0"}
+a building that is sitting on the side of a pond, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a building that is sitting on the side of a pond.jpg", "mask_strategy": "0"}
+a building that is sitting on the side of a pond, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a building that is sitting on the side of a pond.jpg", "mask_strategy": "0"}
+a building that is sitting on the side of a pond, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a building that is sitting on the side of a pond.jpg", "mask_strategy": "0"}
+an aerial view of a busy city with a bridge in the background{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a busy city with a bridge in the background.jpg", "mask_strategy": "0"}
+an aerial view of a busy city with a bridge in the background, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a busy city with a bridge in the background.jpg", "mask_strategy": "0"}
+an aerial view of a busy city with a bridge in the background, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a busy city with a bridge in the background.jpg", "mask_strategy": "0"}
+an aerial view of a busy city with a bridge in the background, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a busy city with a bridge in the background.jpg", "mask_strategy": "0"}
+an aerial view of a busy city with a bridge in the background, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a busy city with a bridge in the background.jpg", "mask_strategy": "0"}
+an aerial view of a busy city with a bridge in the background, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a busy city with a bridge in the background.jpg", "mask_strategy": "0"}
+an aerial view of a busy city with a bridge in the background, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a busy city with a bridge in the background.jpg", "mask_strategy": "0"}
+an aerial view of a busy city with a bridge in the background, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a busy city with a bridge in the background.jpg", "mask_strategy": "0"}
+a bridge that is over a body of water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is over a body of water.jpg", "mask_strategy": "0"}
+a bridge that is over a body of water, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is over a body of water.jpg", "mask_strategy": "0"}
+a bridge that is over a body of water, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is over a body of water.jpg", "mask_strategy": "0"}
+a bridge that is over a body of water, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is over a body of water.jpg", "mask_strategy": "0"}
+a bridge that is over a body of water, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is over a body of water.jpg", "mask_strategy": "0"}
+a bridge that is over a body of water, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is over a body of water.jpg", "mask_strategy": "0"}
+a bridge that is over a body of water, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is over a body of water.jpg", "mask_strategy": "0"}
+a bridge that is over a body of water, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is over a body of water.jpg", "mask_strategy": "0"}
+a pile of wood sitting next to a log house{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pile of wood sitting next to a log house.jpg", "mask_strategy": "0"}
+a pile of wood sitting next to a log house, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pile of wood sitting next to a log house.jpg", "mask_strategy": "0"}
+a pile of wood sitting next to a log house, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pile of wood sitting next to a log house.jpg", "mask_strategy": "0"}
+a pile of wood sitting next to a log house, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pile of wood sitting next to a log house.jpg", "mask_strategy": "0"}
+a pile of wood sitting next to a log house, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pile of wood sitting next to a log house.jpg", "mask_strategy": "0"}
+a pile of wood sitting next to a log house, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pile of wood sitting next to a log house.jpg", "mask_strategy": "0"}
+a pile of wood sitting next to a log house, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pile of wood sitting next to a log house.jpg", "mask_strategy": "0"}
+a pile of wood sitting next to a log house, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pile of wood sitting next to a log house.jpg", "mask_strategy": "0"}
+a view of a snowy mountain side with many buildings{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a snowy mountain side with many buildings.jpg", "mask_strategy": "0"}
+a view of a snowy mountain side with many buildings, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a snowy mountain side with many buildings.jpg", "mask_strategy": "0"}
+a view of a snowy mountain side with many buildings, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a snowy mountain side with many buildings.jpg", "mask_strategy": "0"}
+a view of a snowy mountain side with many buildings, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a snowy mountain side with many buildings.jpg", "mask_strategy": "0"}
+a view of a snowy mountain side with many buildings, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a snowy mountain side with many buildings.jpg", "mask_strategy": "0"}
+a view of a snowy mountain side with many buildings, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a snowy mountain side with many buildings.jpg", "mask_strategy": "0"}
+a view of a snowy mountain side with many buildings, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a snowy mountain side with many buildings.jpg", "mask_strategy": "0"}
+a view of a snowy mountain side with many buildings, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a snowy mountain side with many buildings.jpg", "mask_strategy": "0"}
+san francisco skyline at sunset{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/san francisco skyline at sunset.jpg", "mask_strategy": "0"}
+san francisco skyline at sunset, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/san francisco skyline at sunset.jpg", "mask_strategy": "0"}
+san francisco skyline at sunset, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/san francisco skyline at sunset.jpg", "mask_strategy": "0"}
+san francisco skyline at sunset, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/san francisco skyline at sunset.jpg", "mask_strategy": "0"}
+san francisco skyline at sunset, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/san francisco skyline at sunset.jpg", "mask_strategy": "0"}
+san francisco skyline at sunset, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/san francisco skyline at sunset.jpg", "mask_strategy": "0"}
+san francisco skyline at sunset, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/san francisco skyline at sunset.jpg", "mask_strategy": "0"}
+san francisco skyline at sunset, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/san francisco skyline at sunset.jpg", "mask_strategy": "0"}
+a castle on top of a hill covered in snow{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a castle on top of a hill covered in snow.jpg", "mask_strategy": "0"}
+a castle on top of a hill covered in snow, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a castle on top of a hill covered in snow.jpg", "mask_strategy": "0"}
+a castle on top of a hill covered in snow, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a castle on top of a hill covered in snow.jpg", "mask_strategy": "0"}
+a castle on top of a hill covered in snow, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a castle on top of a hill covered in snow.jpg", "mask_strategy": "0"}
+a castle on top of a hill covered in snow, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a castle on top of a hill covered in snow.jpg", "mask_strategy": "0"}
+a castle on top of a hill covered in snow, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a castle on top of a hill covered in snow.jpg", "mask_strategy": "0"}
+a castle on top of a hill covered in snow, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a castle on top of a hill covered in snow.jpg", "mask_strategy": "0"}
+a castle on top of a hill covered in snow, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a castle on top of a hill covered in snow.jpg", "mask_strategy": "0"}
+an aerial view of big ben and the houses of parliament in london{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of big ben and the houses of parliament in london.jpg", "mask_strategy": "0"}
+an aerial view of big ben and the houses of parliament in london, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of big ben and the houses of parliament in london.jpg", "mask_strategy": "0"}
+an aerial view of big ben and the houses of parliament in london, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of big ben and the houses of parliament in london.jpg", "mask_strategy": "0"}
+an aerial view of big ben and the houses of parliament in london, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of big ben and the houses of parliament in london.jpg", "mask_strategy": "0"}
+an aerial view of big ben and the houses of parliament in london, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of big ben and the houses of parliament in london.jpg", "mask_strategy": "0"}
+an aerial view of big ben and the houses of parliament in london, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of big ben and the houses of parliament in london.jpg", "mask_strategy": "0"}
+an aerial view of big ben and the houses of parliament in london, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of big ben and the houses of parliament in london.jpg", "mask_strategy": "0"}
+an aerial view of big ben and the houses of parliament in london, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of big ben and the houses of parliament in london.jpg", "mask_strategy": "0"}
+a beach with a lot of buildings on the side of a cliff{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a beach with a lot of buildings on the side of a cliff.jpg", "mask_strategy": "0"}
+a beach with a lot of buildings on the side of a cliff, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a beach with a lot of buildings on the side of a cliff.jpg", "mask_strategy": "0"}
+a beach with a lot of buildings on the side of a cliff, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a beach with a lot of buildings on the side of a cliff.jpg", "mask_strategy": "0"}
+a beach with a lot of buildings on the side of a cliff, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a beach with a lot of buildings on the side of a cliff.jpg", "mask_strategy": "0"}
+a beach with a lot of buildings on the side of a cliff, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a beach with a lot of buildings on the side of a cliff.jpg", "mask_strategy": "0"}
+a beach with a lot of buildings on the side of a cliff, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a beach with a lot of buildings on the side of a cliff.jpg", "mask_strategy": "0"}
+a beach with a lot of buildings on the side of a cliff, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a beach with a lot of buildings on the side of a cliff.jpg", "mask_strategy": "0"}
+a beach with a lot of buildings on the side of a cliff, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a beach with a lot of buildings on the side of a cliff.jpg", "mask_strategy": "0"}
+an alley way in an old european city{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an alley way in an old european city.jpg", "mask_strategy": "0"}
+an alley way in an old european city, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an alley way in an old european city.jpg", "mask_strategy": "0"}
+an alley way in an old european city, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an alley way in an old european city.jpg", "mask_strategy": "0"}
+an alley way in an old european city, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an alley way in an old european city.jpg", "mask_strategy": "0"}
+an alley way in an old european city, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an alley way in an old european city.jpg", "mask_strategy": "0"}
+an alley way in an old european city, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an alley way in an old european city.jpg", "mask_strategy": "0"}
+an alley way in an old european city, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an alley way in an old european city.jpg", "mask_strategy": "0"}
+an alley way in an old european city, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an alley way in an old european city.jpg", "mask_strategy": "0"}
+the golden gate bridge in san franscisco is lit up by the setting sun{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the golden gate bridge in san franscisco is lit up by the setting sun.jpg", "mask_strategy": "0"}
+the golden gate bridge in san franscisco is lit up by the setting sun, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the golden gate bridge in san franscisco is lit up by the setting sun.jpg", "mask_strategy": "0"}
+the golden gate bridge in san franscisco is lit up by the setting sun, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the golden gate bridge in san franscisco is lit up by the setting sun.jpg", "mask_strategy": "0"}
+the golden gate bridge in san franscisco is lit up by the setting sun, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the golden gate bridge in san franscisco is lit up by the setting sun.jpg", "mask_strategy": "0"}
+the golden gate bridge in san franscisco is lit up by the setting sun, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the golden gate bridge in san franscisco is lit up by the setting sun.jpg", "mask_strategy": "0"}
+the golden gate bridge in san franscisco is lit up by the setting sun, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the golden gate bridge in san franscisco is lit up by the setting sun.jpg", "mask_strategy": "0"}
+the golden gate bridge in san franscisco is lit up by the setting sun, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the golden gate bridge in san franscisco is lit up by the setting sun.jpg", "mask_strategy": "0"}
+the golden gate bridge in san franscisco is lit up by the setting sun, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the golden gate bridge in san franscisco is lit up by the setting sun.jpg", "mask_strategy": "0"}
+the great wall of china in autumn{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the great wall of china in autumn.jpg", "mask_strategy": "0"}
+the great wall of china in autumn, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the great wall of china in autumn.jpg", "mask_strategy": "0"}
+the great wall of china in autumn, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the great wall of china in autumn.jpg", "mask_strategy": "0"}
+the great wall of china in autumn, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the great wall of china in autumn.jpg", "mask_strategy": "0"}
+the great wall of china in autumn, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the great wall of china in autumn.jpg", "mask_strategy": "0"}
+the great wall of china in autumn, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the great wall of china in autumn.jpg", "mask_strategy": "0"}
+the great wall of china in autumn, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the great wall of china in autumn.jpg", "mask_strategy": "0"}
+the great wall of china in autumn, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the great wall of china in autumn.jpg", "mask_strategy": "0"}
+the town of hallstatt is surrounded by mountains and water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the town of hallstatt is surrounded by mountains and water.jpg", "mask_strategy": "0"}
+the town of hallstatt is surrounded by mountains and water, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the town of hallstatt is surrounded by mountains and water.jpg", "mask_strategy": "0"}
+the town of hallstatt is surrounded by mountains and water, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the town of hallstatt is surrounded by mountains and water.jpg", "mask_strategy": "0"}
+the town of hallstatt is surrounded by mountains and water, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the town of hallstatt is surrounded by mountains and water.jpg", "mask_strategy": "0"}
+the town of hallstatt is surrounded by mountains and water, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the town of hallstatt is surrounded by mountains and water.jpg", "mask_strategy": "0"}
+the town of hallstatt is surrounded by mountains and water, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the town of hallstatt is surrounded by mountains and water.jpg", "mask_strategy": "0"}
+the town of hallstatt is surrounded by mountains and water, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the town of hallstatt is surrounded by mountains and water.jpg", "mask_strategy": "0"}
+the town of hallstatt is surrounded by mountains and water, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the town of hallstatt is surrounded by mountains and water.jpg", "mask_strategy": "0"}
+tokyo skyline at night{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tokyo skyline at night.jpg", "mask_strategy": "0"}
+tokyo skyline at night, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tokyo skyline at night.jpg", "mask_strategy": "0"}
+tokyo skyline at night, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tokyo skyline at night.jpg", "mask_strategy": "0"}
+tokyo skyline at night, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tokyo skyline at night.jpg", "mask_strategy": "0"}
+tokyo skyline at night, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tokyo skyline at night.jpg", "mask_strategy": "0"}
+tokyo skyline at night, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tokyo skyline at night.jpg", "mask_strategy": "0"}
+tokyo skyline at night, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tokyo skyline at night.jpg", "mask_strategy": "0"}
+tokyo skyline at night, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tokyo skyline at night.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse.jpg", "mask_strategy": "0"}
+a church sits on top of a hill under a cloudy sky{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a church sits on top of a hill under a cloudy sky.jpg", "mask_strategy": "0"}
+a church sits on top of a hill under a cloudy sky, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a church sits on top of a hill under a cloudy sky.jpg", "mask_strategy": "0"}
+a church sits on top of a hill under a cloudy sky, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a church sits on top of a hill under a cloudy sky.jpg", "mask_strategy": "0"}
+a church sits on top of a hill under a cloudy sky, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a church sits on top of a hill under a cloudy sky.jpg", "mask_strategy": "0"}
+a church sits on top of a hill under a cloudy sky, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a church sits on top of a hill under a cloudy sky.jpg", "mask_strategy": "0"}
+a church sits on top of a hill under a cloudy sky, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a church sits on top of a hill under a cloudy sky.jpg", "mask_strategy": "0"}
+a church sits on top of a hill under a cloudy sky, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a church sits on top of a hill under a cloudy sky.jpg", "mask_strategy": "0"}
+a church sits on top of a hill under a cloudy sky, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a church sits on top of a hill under a cloudy sky.jpg", "mask_strategy": "0"}
+the parthenon in acropolis, greece{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the parthenon in acropolis, greece.jpg", "mask_strategy": "0"}
+the parthenon in acropolis, greece, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the parthenon in acropolis, greece.jpg", "mask_strategy": "0"}
+the parthenon in acropolis, greece, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the parthenon in acropolis, greece.jpg", "mask_strategy": "0"}
+the parthenon in acropolis, greece, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the parthenon in acropolis, greece.jpg", "mask_strategy": "0"}
+the parthenon in acropolis, greece, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the parthenon in acropolis, greece.jpg", "mask_strategy": "0"}
+the parthenon in acropolis, greece, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the parthenon in acropolis, greece.jpg", "mask_strategy": "0"}
+the parthenon in acropolis, greece, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the parthenon in acropolis, greece.jpg", "mask_strategy": "0"}
+the parthenon in acropolis, greece, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the parthenon in acropolis, greece.jpg", "mask_strategy": "0"}
+a large crowd of people walking in a shopping mall{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large crowd of people walking in a shopping mall.jpg", "mask_strategy": "0"}
+a large crowd of people walking in a shopping mall, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large crowd of people walking in a shopping mall.jpg", "mask_strategy": "0"}
+a large crowd of people walking in a shopping mall, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large crowd of people walking in a shopping mall.jpg", "mask_strategy": "0"}
+a large crowd of people walking in a shopping mall, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large crowd of people walking in a shopping mall.jpg", "mask_strategy": "0"}
+a large crowd of people walking in a shopping mall, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large crowd of people walking in a shopping mall.jpg", "mask_strategy": "0"}
+a large crowd of people walking in a shopping mall, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large crowd of people walking in a shopping mall.jpg", "mask_strategy": "0"}
+a large crowd of people walking in a shopping mall, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large crowd of people walking in a shopping mall.jpg", "mask_strategy": "0"}
+a large crowd of people walking in a shopping mall, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large crowd of people walking in a shopping mall.jpg", "mask_strategy": "0"}
+the pyramids of giza, egypt{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the pyramids of giza, egypt.jpg", "mask_strategy": "0"}
+the pyramids of giza, egypt, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the pyramids of giza, egypt.jpg", "mask_strategy": "0"}
+the pyramids of giza, egypt, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the pyramids of giza, egypt.jpg", "mask_strategy": "0"}
+the pyramids of giza, egypt, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the pyramids of giza, egypt.jpg", "mask_strategy": "0"}
+the pyramids of giza, egypt, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the pyramids of giza, egypt.jpg", "mask_strategy": "0"}
+the pyramids of giza, egypt, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the pyramids of giza, egypt.jpg", "mask_strategy": "0"}
+the pyramids of giza, egypt, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the pyramids of giza, egypt.jpg", "mask_strategy": "0"}
+the pyramids of giza, egypt, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the pyramids of giza, egypt.jpg", "mask_strategy": "0"}
+a stage door painted with a star on the side of a brick wall{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a stage door painted with a star on the side of a brick wall.jpg", "mask_strategy": "0"}
+a stage door painted with a star on the side of a brick wall, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a stage door painted with a star on the side of a brick wall.jpg", "mask_strategy": "0"}
+a stage door painted with a star on the side of a brick wall, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a stage door painted with a star on the side of a brick wall.jpg", "mask_strategy": "0"}
+a stage door painted with a star on the side of a brick wall, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a stage door painted with a star on the side of a brick wall.jpg", "mask_strategy": "0"}
+a stage door painted with a star on the side of a brick wall, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a stage door painted with a star on the side of a brick wall.jpg", "mask_strategy": "0"}
+a stage door painted with a star on the side of a brick wall, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a stage door painted with a star on the side of a brick wall.jpg", "mask_strategy": "0"}
+a stage door painted with a star on the side of a brick wall, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a stage door painted with a star on the side of a brick wall.jpg", "mask_strategy": "0"}
+a stage door painted with a star on the side of a brick wall, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a stage door painted with a star on the side of a brick wall.jpg", "mask_strategy": "0"}
+a light house on the edge of the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a light house on the edge of the water.jpg", "mask_strategy": "0"}
+a light house on the edge of the water, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a light house on the edge of the water.jpg", "mask_strategy": "0"}
+a light house on the edge of the water, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a light house on the edge of the water.jpg", "mask_strategy": "0"}
+a light house on the edge of the water, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a light house on the edge of the water.jpg", "mask_strategy": "0"}
+a light house on the edge of the water, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a light house on the edge of the water.jpg", "mask_strategy": "0"}
+a light house on the edge of the water, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a light house on the edge of the water.jpg", "mask_strategy": "0"}
+a light house on the edge of the water, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a light house on the edge of the water.jpg", "mask_strategy": "0"}
+a light house on the edge of the water, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a light house on the edge of the water.jpg", "mask_strategy": "0"}
+an asian city street at night with people and bicycles{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an asian city street at night with people and bicycles.jpg", "mask_strategy": "0"}
+an asian city street at night with people and bicycles, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an asian city street at night with people and bicycles.jpg", "mask_strategy": "0"}
+an asian city street at night with people and bicycles, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an asian city street at night with people and bicycles.jpg", "mask_strategy": "0"}
+an asian city street at night with people and bicycles, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an asian city street at night with people and bicycles.jpg", "mask_strategy": "0"}
+an asian city street at night with people and bicycles, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an asian city street at night with people and bicycles.jpg", "mask_strategy": "0"}
+an asian city street at night with people and bicycles, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an asian city street at night with people and bicycles.jpg", "mask_strategy": "0"}
+an asian city street at night with people and bicycles, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an asian city street at night with people and bicycles.jpg", "mask_strategy": "0"}
+an asian city street at night with people and bicycles, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an asian city street at night with people and bicycles.jpg", "mask_strategy": "0"}
+a couple of wooden benches in the middle of a street{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a couple of wooden benches in the middle of a street.jpg", "mask_strategy": "0"}
+a couple of wooden benches in the middle of a street, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a couple of wooden benches in the middle of a street.jpg", "mask_strategy": "0"}
+a couple of wooden benches in the middle of a street, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a couple of wooden benches in the middle of a street.jpg", "mask_strategy": "0"}
+a couple of wooden benches in the middle of a street, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a couple of wooden benches in the middle of a street.jpg", "mask_strategy": "0"}
+a couple of wooden benches in the middle of a street, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a couple of wooden benches in the middle of a street.jpg", "mask_strategy": "0"}
+a couple of wooden benches in the middle of a street, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a couple of wooden benches in the middle of a street.jpg", "mask_strategy": "0"}
+a couple of wooden benches in the middle of a street, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a couple of wooden benches in the middle of a street.jpg", "mask_strategy": "0"}
+a couple of wooden benches in the middle of a street, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a couple of wooden benches in the middle of a street.jpg", "mask_strategy": "0"}
+a pagoda sits on top of a mountain in japan{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pagoda sits on top of a mountain in japan.jpg", "mask_strategy": "0"}
+a pagoda sits on top of a mountain in japan, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pagoda sits on top of a mountain in japan.jpg", "mask_strategy": "0"}
+a pagoda sits on top of a mountain in japan, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pagoda sits on top of a mountain in japan.jpg", "mask_strategy": "0"}
+a pagoda sits on top of a mountain in japan, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pagoda sits on top of a mountain in japan.jpg", "mask_strategy": "0"}
+a pagoda sits on top of a mountain in japan, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pagoda sits on top of a mountain in japan.jpg", "mask_strategy": "0"}
+a pagoda sits on top of a mountain in japan, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pagoda sits on top of a mountain in japan.jpg", "mask_strategy": "0"}
+a pagoda sits on top of a mountain in japan, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pagoda sits on top of a mountain in japan.jpg", "mask_strategy": "0"}
+a pagoda sits on top of a mountain in japan, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pagoda sits on top of a mountain in japan.jpg", "mask_strategy": "0"}
+a red bus driving down a snowy street at night{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a red bus driving down a snowy street at night.jpg", "mask_strategy": "0"}
+a red bus driving down a snowy street at night, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a red bus driving down a snowy street at night.jpg", "mask_strategy": "0"}
+a red bus driving down a snowy street at night, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a red bus driving down a snowy street at night.jpg", "mask_strategy": "0"}
+a red bus driving down a snowy street at night, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a red bus driving down a snowy street at night.jpg", "mask_strategy": "0"}
+a red bus driving down a snowy street at night, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a red bus driving down a snowy street at night.jpg", "mask_strategy": "0"}
+a red bus driving down a snowy street at night, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a red bus driving down a snowy street at night.jpg", "mask_strategy": "0"}
+a red bus driving down a snowy street at night, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a red bus driving down a snowy street at night.jpg", "mask_strategy": "0"}
+a red bus driving down a snowy street at night, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a red bus driving down a snowy street at night.jpg", "mask_strategy": "0"}
+a snow covered street{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a snow covered street.jpg", "mask_strategy": "0"}
+a snow covered street, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a snow covered street.jpg", "mask_strategy": "0"}
+a snow covered street, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a snow covered street.jpg", "mask_strategy": "0"}
+a snow covered street, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a snow covered street.jpg", "mask_strategy": "0"}
+a snow covered street, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a snow covered street.jpg", "mask_strategy": "0"}
+a snow covered street, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a snow covered street.jpg", "mask_strategy": "0"}
+a snow covered street, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a snow covered street.jpg", "mask_strategy": "0"}
+a snow covered street, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a snow covered street.jpg", "mask_strategy": "0"}
+a house with snow on the ground{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a house with snow on the ground.jpg", "mask_strategy": "0"}
+a house with snow on the ground, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a house with snow on the ground.jpg", "mask_strategy": "0"}
+a house with snow on the ground, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a house with snow on the ground.jpg", "mask_strategy": "0"}
+a house with snow on the ground, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a house with snow on the ground.jpg", "mask_strategy": "0"}
+a house with snow on the ground, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a house with snow on the ground.jpg", "mask_strategy": "0"}
+a house with snow on the ground, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a house with snow on the ground.jpg", "mask_strategy": "0"}
+a house with snow on the ground, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a house with snow on the ground.jpg", "mask_strategy": "0"}
+a house with snow on the ground, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a house with snow on the ground.jpg", "mask_strategy": "0"}
+cars parked on the side of the road during a snowstorm{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/cars parked on the side of the road during a snowstorm.jpg", "mask_strategy": "0"}
+cars parked on the side of the road during a snowstorm, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/cars parked on the side of the road during a snowstorm.jpg", "mask_strategy": "0"}
+cars parked on the side of the road during a snowstorm, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/cars parked on the side of the road during a snowstorm.jpg", "mask_strategy": "0"}
+cars parked on the side of the road during a snowstorm, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/cars parked on the side of the road during a snowstorm.jpg", "mask_strategy": "0"}
+cars parked on the side of the road during a snowstorm, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/cars parked on the side of the road during a snowstorm.jpg", "mask_strategy": "0"}
+cars parked on the side of the road during a snowstorm, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/cars parked on the side of the road during a snowstorm.jpg", "mask_strategy": "0"}
+cars parked on the side of the road during a snowstorm, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/cars parked on the side of the road during a snowstorm.jpg", "mask_strategy": "0"}
+cars parked on the side of the road during a snowstorm, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/cars parked on the side of the road during a snowstorm.jpg", "mask_strategy": "0"}
+a group of statues on the side of a building{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of statues on the side of a building.jpg", "mask_strategy": "0"}
+a group of statues on the side of a building, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of statues on the side of a building.jpg", "mask_strategy": "0"}
+a group of statues on the side of a building, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of statues on the side of a building.jpg", "mask_strategy": "0"}
+a group of statues on the side of a building, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of statues on the side of a building.jpg", "mask_strategy": "0"}
+a group of statues on the side of a building, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of statues on the side of a building.jpg", "mask_strategy": "0"}
+a group of statues on the side of a building, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of statues on the side of a building.jpg", "mask_strategy": "0"}
+a group of statues on the side of a building, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of statues on the side of a building.jpg", "mask_strategy": "0"}
+a group of statues on the side of a building, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of statues on the side of a building.jpg", "mask_strategy": "0"}
+a city street at night during a snow storm{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a city street at night during a snow storm.jpg", "mask_strategy": "0"}
+a city street at night during a snow storm, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a city street at night during a snow storm.jpg", "mask_strategy": "0"}
+a city street at night during a snow storm, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a city street at night during a snow storm.jpg", "mask_strategy": "0"}
+a city street at night during a snow storm, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a city street at night during a snow storm.jpg", "mask_strategy": "0"}
+a city street at night during a snow storm, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a city street at night during a snow storm.jpg", "mask_strategy": "0"}
+a city street at night during a snow storm, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a city street at night during a snow storm.jpg", "mask_strategy": "0"}
+a city street at night during a snow storm, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a city street at night during a snow storm.jpg", "mask_strategy": "0"}
+a city street at night during a snow storm, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a city street at night during a snow storm.jpg", "mask_strategy": "0"}
+tower bridge in london{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tower bridge in london.jpg", "mask_strategy": "0"}
+tower bridge in london, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tower bridge in london.jpg", "mask_strategy": "0"}
+tower bridge in london, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tower bridge in london.jpg", "mask_strategy": "0"}
+tower bridge in london, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tower bridge in london.jpg", "mask_strategy": "0"}
+tower bridge in london, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tower bridge in london.jpg", "mask_strategy": "0"}
+tower bridge in london, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tower bridge in london.jpg", "mask_strategy": "0"}
+tower bridge in london, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tower bridge in london.jpg", "mask_strategy": "0"}
+tower bridge in london, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tower bridge in london.jpg", "mask_strategy": "0"}
+chinese pagoda in the middle of a snowy day{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/chinese pagoda in the middle of a snowy day.jpg", "mask_strategy": "0"}
+chinese pagoda in the middle of a snowy day, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/chinese pagoda in the middle of a snowy day.jpg", "mask_strategy": "0"}
+chinese pagoda in the middle of a snowy day, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/chinese pagoda in the middle of a snowy day.jpg", "mask_strategy": "0"}
+chinese pagoda in the middle of a snowy day, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/chinese pagoda in the middle of a snowy day.jpg", "mask_strategy": "0"}
+chinese pagoda in the middle of a snowy day, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/chinese pagoda in the middle of a snowy day.jpg", "mask_strategy": "0"}
+chinese pagoda in the middle of a snowy day, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/chinese pagoda in the middle of a snowy day.jpg", "mask_strategy": "0"}
+chinese pagoda in the middle of a snowy day, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/chinese pagoda in the middle of a snowy day.jpg", "mask_strategy": "0"}
+chinese pagoda in the middle of a snowy day, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/chinese pagoda in the middle of a snowy day.jpg", "mask_strategy": "0"}
+a dark alleyway with a bus driving down it{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dark alleyway with a bus driving down it.jpg", "mask_strategy": "0"}
+a dark alleyway with a bus driving down it, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dark alleyway with a bus driving down it.jpg", "mask_strategy": "0"}
+a dark alleyway with a bus driving down it, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dark alleyway with a bus driving down it.jpg", "mask_strategy": "0"}
+a dark alleyway with a bus driving down it, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dark alleyway with a bus driving down it.jpg", "mask_strategy": "0"}
+a dark alleyway with a bus driving down it, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dark alleyway with a bus driving down it.jpg", "mask_strategy": "0"}
+a dark alleyway with a bus driving down it, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dark alleyway with a bus driving down it.jpg", "mask_strategy": "0"}
+a dark alleyway with a bus driving down it, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dark alleyway with a bus driving down it.jpg", "mask_strategy": "0"}
+a dark alleyway with a bus driving down it, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dark alleyway with a bus driving down it.jpg", "mask_strategy": "0"}
+a monastery sits on top of a cliff in bhutan{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a monastery sits on top of a cliff in bhutan.jpg", "mask_strategy": "0"}
+a monastery sits on top of a cliff in bhutan, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a monastery sits on top of a cliff in bhutan.jpg", "mask_strategy": "0"}
+a monastery sits on top of a cliff in bhutan, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a monastery sits on top of a cliff in bhutan.jpg", "mask_strategy": "0"}
+a monastery sits on top of a cliff in bhutan, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a monastery sits on top of a cliff in bhutan.jpg", "mask_strategy": "0"}
+a monastery sits on top of a cliff in bhutan, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a monastery sits on top of a cliff in bhutan.jpg", "mask_strategy": "0"}
+a monastery sits on top of a cliff in bhutan, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a monastery sits on top of a cliff in bhutan.jpg", "mask_strategy": "0"}
+a monastery sits on top of a cliff in bhutan, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a monastery sits on top of a cliff in bhutan.jpg", "mask_strategy": "0"}
+a monastery sits on top of a cliff in bhutan, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a monastery sits on top of a cliff in bhutan.jpg", "mask_strategy": "0"}
+the dome of the rock in jerusalem{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the dome of the rock in jerusalem.jpg", "mask_strategy": "0"}
+the dome of the rock in jerusalem, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the dome of the rock in jerusalem.jpg", "mask_strategy": "0"}
+the dome of the rock in jerusalem, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the dome of the rock in jerusalem.jpg", "mask_strategy": "0"}
+the dome of the rock in jerusalem, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the dome of the rock in jerusalem.jpg", "mask_strategy": "0"}
+the dome of the rock in jerusalem, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the dome of the rock in jerusalem.jpg", "mask_strategy": "0"}
+the dome of the rock in jerusalem, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the dome of the rock in jerusalem.jpg", "mask_strategy": "0"}
+the dome of the rock in jerusalem, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the dome of the rock in jerusalem.jpg", "mask_strategy": "0"}
+the dome of the rock in jerusalem, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the dome of the rock in jerusalem.jpg", "mask_strategy": "0"}
+an aerial view of a futuristic building on a cliff overlooking a body of water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a futuristic building on a cliff overlooking a body of water.jpg", "mask_strategy": "0"}
+an aerial view of a futuristic building on a cliff overlooking a body of water, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a futuristic building on a cliff overlooking a body of water.jpg", "mask_strategy": "0"}
+an aerial view of a futuristic building on a cliff overlooking a body of water, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a futuristic building on a cliff overlooking a body of water.jpg", "mask_strategy": "0"}
+an aerial view of a futuristic building on a cliff overlooking a body of water, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a futuristic building on a cliff overlooking a body of water.jpg", "mask_strategy": "0"}
+an aerial view of a futuristic building on a cliff overlooking a body of water, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a futuristic building on a cliff overlooking a body of water.jpg", "mask_strategy": "0"}
+an aerial view of a futuristic building on a cliff overlooking a body of water, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a futuristic building on a cliff overlooking a body of water.jpg", "mask_strategy": "0"}
+an aerial view of a futuristic building on a cliff overlooking a body of water, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a futuristic building on a cliff overlooking a body of water.jpg", "mask_strategy": "0"}
+an aerial view of a futuristic building on a cliff overlooking a body of water, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a futuristic building on a cliff overlooking a body of water.jpg", "mask_strategy": "0"}
+a reflection of a city with buildings in the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a reflection of a city with buildings in the water.jpg", "mask_strategy": "0"}
+a reflection of a city with buildings in the water, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a reflection of a city with buildings in the water.jpg", "mask_strategy": "0"}
+a reflection of a city with buildings in the water, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a reflection of a city with buildings in the water.jpg", "mask_strategy": "0"}
+a reflection of a city with buildings in the water, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a reflection of a city with buildings in the water.jpg", "mask_strategy": "0"}
+a reflection of a city with buildings in the water, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a reflection of a city with buildings in the water.jpg", "mask_strategy": "0"}
+a reflection of a city with buildings in the water, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a reflection of a city with buildings in the water.jpg", "mask_strategy": "0"}
+a reflection of a city with buildings in the water, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a reflection of a city with buildings in the water.jpg", "mask_strategy": "0"}
+a reflection of a city with buildings in the water, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a reflection of a city with buildings in the water.jpg", "mask_strategy": "0"}
+a bar with chairs and a television on the wall{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bar with chairs and a television on the wall.jpg", "mask_strategy": "0"}
+a bar with chairs and a television on the wall, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bar with chairs and a television on the wall.jpg", "mask_strategy": "0"}
+a bar with chairs and a television on the wall, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bar with chairs and a television on the wall.jpg", "mask_strategy": "0"}
+a bar with chairs and a television on the wall, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bar with chairs and a television on the wall.jpg", "mask_strategy": "0"}
+a bar with chairs and a television on the wall, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bar with chairs and a television on the wall.jpg", "mask_strategy": "0"}
+a bar with chairs and a television on the wall, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bar with chairs and a television on the wall.jpg", "mask_strategy": "0"}
+a bar with chairs and a television on the wall, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bar with chairs and a television on the wall.jpg", "mask_strategy": "0"}
+a bar with chairs and a television on the wall, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bar with chairs and a television on the wall.jpg", "mask_strategy": "0"}
+a living room filled with lots of books on a wall{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with lots of books on a wall.jpg", "mask_strategy": "0"}
+a living room filled with lots of books on a wall, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with lots of books on a wall.jpg", "mask_strategy": "0"}
+a living room filled with lots of books on a wall, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with lots of books on a wall.jpg", "mask_strategy": "0"}
+a living room filled with lots of books on a wall, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with lots of books on a wall.jpg", "mask_strategy": "0"}
+a living room filled with lots of books on a wall, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with lots of books on a wall.jpg", "mask_strategy": "0"}
+a living room filled with lots of books on a wall, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with lots of books on a wall.jpg", "mask_strategy": "0"}
+a living room filled with lots of books on a wall, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with lots of books on a wall.jpg", "mask_strategy": "0"}
+a living room filled with lots of books on a wall, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with lots of books on a wall.jpg", "mask_strategy": "0"}
+a living room filled with furniture next to a stone wall{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with furniture next to a stone wall.jpg", "mask_strategy": "0"}
+a living room filled with furniture next to a stone wall, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with furniture next to a stone wall.jpg", "mask_strategy": "0"}
+a living room filled with furniture next to a stone wall, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with furniture next to a stone wall.jpg", "mask_strategy": "0"}
+a living room filled with furniture next to a stone wall, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with furniture next to a stone wall.jpg", "mask_strategy": "0"}
+a living room filled with furniture next to a stone wall, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with furniture next to a stone wall.jpg", "mask_strategy": "0"}
+a living room filled with furniture next to a stone wall, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with furniture next to a stone wall.jpg", "mask_strategy": "0"}
+a living room filled with furniture next to a stone wall, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with furniture next to a stone wall.jpg", "mask_strategy": "0"}
+a living room filled with furniture next to a stone wall, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room filled with furniture next to a stone wall.jpg", "mask_strategy": "0"}
+a table and chairs in a room with sunlight coming through the window{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with sunlight coming through the window.jpg", "mask_strategy": "0"}
+a table and chairs in a room with sunlight coming through the window, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with sunlight coming through the window.jpg", "mask_strategy": "0"}
+a table and chairs in a room with sunlight coming through the window, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with sunlight coming through the window.jpg", "mask_strategy": "0"}
+a table and chairs in a room with sunlight coming through the window, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with sunlight coming through the window.jpg", "mask_strategy": "0"}
+a table and chairs in a room with sunlight coming through the window, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with sunlight coming through the window.jpg", "mask_strategy": "0"}
+a table and chairs in a room with sunlight coming through the window, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with sunlight coming through the window.jpg", "mask_strategy": "0"}
+a table and chairs in a room with sunlight coming through the window, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with sunlight coming through the window.jpg", "mask_strategy": "0"}
+a table and chairs in a room with sunlight coming through the window, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with sunlight coming through the window.jpg", "mask_strategy": "0"}
+a room filled with lots of shelves filled with books{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with lots of shelves filled with books.jpg", "mask_strategy": "0"}
+a room filled with lots of shelves filled with books, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with lots of shelves filled with books.jpg", "mask_strategy": "0"}
+a room filled with lots of shelves filled with books, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with lots of shelves filled with books.jpg", "mask_strategy": "0"}
+a room filled with lots of shelves filled with books, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with lots of shelves filled with books.jpg", "mask_strategy": "0"}
+a room filled with lots of shelves filled with books, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with lots of shelves filled with books.jpg", "mask_strategy": "0"}
+a room filled with lots of shelves filled with books, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with lots of shelves filled with books.jpg", "mask_strategy": "0"}
+a room filled with lots of shelves filled with books, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with lots of shelves filled with books.jpg", "mask_strategy": "0"}
+a room filled with lots of shelves filled with books, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with lots of shelves filled with books.jpg", "mask_strategy": "0"}
+an art gallery with paintings on the walls{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an art gallery with paintings on the walls.jpg", "mask_strategy": "0"}
+an art gallery with paintings on the walls, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an art gallery with paintings on the walls.jpg", "mask_strategy": "0"}
+an art gallery with paintings on the walls, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an art gallery with paintings on the walls.jpg", "mask_strategy": "0"}
+an art gallery with paintings on the walls, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an art gallery with paintings on the walls.jpg", "mask_strategy": "0"}
+an art gallery with paintings on the walls, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an art gallery with paintings on the walls.jpg", "mask_strategy": "0"}
+an art gallery with paintings on the walls, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an art gallery with paintings on the walls.jpg", "mask_strategy": "0"}
+an art gallery with paintings on the walls, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an art gallery with paintings on the walls.jpg", "mask_strategy": "0"}
+an art gallery with paintings on the walls, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an art gallery with paintings on the walls.jpg", "mask_strategy": "0"}
+a room with a lot of pictures on the walls{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a lot of pictures on the walls.jpg", "mask_strategy": "0"}
+a room with a lot of pictures on the walls, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a lot of pictures on the walls.jpg", "mask_strategy": "0"}
+a room with a lot of pictures on the walls, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a lot of pictures on the walls.jpg", "mask_strategy": "0"}
+a room with a lot of pictures on the walls, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a lot of pictures on the walls.jpg", "mask_strategy": "0"}
+a room with a lot of pictures on the walls, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a lot of pictures on the walls.jpg", "mask_strategy": "0"}
+a room with a lot of pictures on the walls, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a lot of pictures on the walls.jpg", "mask_strategy": "0"}
+a room with a lot of pictures on the walls, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a lot of pictures on the walls.jpg", "mask_strategy": "0"}
+a room with a lot of pictures on the walls, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a lot of pictures on the walls.jpg", "mask_strategy": "0"}
+a painting of a cloudy sky next to an easel{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a painting of a cloudy sky next to an easel.jpg", "mask_strategy": "0"}
+a painting of a cloudy sky next to an easel, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a painting of a cloudy sky next to an easel.jpg", "mask_strategy": "0"}
+a painting of a cloudy sky next to an easel, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a painting of a cloudy sky next to an easel.jpg", "mask_strategy": "0"}
+a painting of a cloudy sky next to an easel, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a painting of a cloudy sky next to an easel.jpg", "mask_strategy": "0"}
+a painting of a cloudy sky next to an easel, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a painting of a cloudy sky next to an easel.jpg", "mask_strategy": "0"}
+a painting of a cloudy sky next to an easel, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a painting of a cloudy sky next to an easel.jpg", "mask_strategy": "0"}
+a painting of a cloudy sky next to an easel, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a painting of a cloudy sky next to an easel.jpg", "mask_strategy": "0"}
+a painting of a cloudy sky next to an easel, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a painting of a cloudy sky next to an easel.jpg", "mask_strategy": "0"}
+a living room with a christmas tree and a rocking chair{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a christmas tree and a rocking chair.jpg", "mask_strategy": "0"}
+a living room with a christmas tree and a rocking chair, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a christmas tree and a rocking chair.jpg", "mask_strategy": "0"}
+a living room with a christmas tree and a rocking chair, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a christmas tree and a rocking chair.jpg", "mask_strategy": "0"}
+a living room with a christmas tree and a rocking chair, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a christmas tree and a rocking chair.jpg", "mask_strategy": "0"}
+a living room with a christmas tree and a rocking chair, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a christmas tree and a rocking chair.jpg", "mask_strategy": "0"}
+a living room with a christmas tree and a rocking chair, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a christmas tree and a rocking chair.jpg", "mask_strategy": "0"}
+a living room with a christmas tree and a rocking chair, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a christmas tree and a rocking chair.jpg", "mask_strategy": "0"}
+a living room with a christmas tree and a rocking chair, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a christmas tree and a rocking chair.jpg", "mask_strategy": "0"}
+a kitchen with a sink and a lot of glasses on the counter{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a kitchen with a sink and a lot of glasses on the counter.jpg", "mask_strategy": "0"}
+a kitchen with a sink and a lot of glasses on the counter, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a kitchen with a sink and a lot of glasses on the counter.jpg", "mask_strategy": "0"}
+a kitchen with a sink and a lot of glasses on the counter, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a kitchen with a sink and a lot of glasses on the counter.jpg", "mask_strategy": "0"}
+a kitchen with a sink and a lot of glasses on the counter, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a kitchen with a sink and a lot of glasses on the counter.jpg", "mask_strategy": "0"}
+a kitchen with a sink and a lot of glasses on the counter, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a kitchen with a sink and a lot of glasses on the counter.jpg", "mask_strategy": "0"}
+a kitchen with a sink and a lot of glasses on the counter, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a kitchen with a sink and a lot of glasses on the counter.jpg", "mask_strategy": "0"}
+a kitchen with a sink and a lot of glasses on the counter, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a kitchen with a sink and a lot of glasses on the counter.jpg", "mask_strategy": "0"}
+a kitchen with a sink and a lot of glasses on the counter, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a kitchen with a sink and a lot of glasses on the counter.jpg", "mask_strategy": "0"}
+a wooden table in front of a brick wall with bottles on the wall{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a wooden table in front of a brick wall with bottles on the wall.jpg", "mask_strategy": "0"}
+a wooden table in front of a brick wall with bottles on the wall, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a wooden table in front of a brick wall with bottles on the wall.jpg", "mask_strategy": "0"}
+a wooden table in front of a brick wall with bottles on the wall, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a wooden table in front of a brick wall with bottles on the wall.jpg", "mask_strategy": "0"}
+a wooden table in front of a brick wall with bottles on the wall, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a wooden table in front of a brick wall with bottles on the wall.jpg", "mask_strategy": "0"}
+a wooden table in front of a brick wall with bottles on the wall, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a wooden table in front of a brick wall with bottles on the wall.jpg", "mask_strategy": "0"}
+a wooden table in front of a brick wall with bottles on the wall, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a wooden table in front of a brick wall with bottles on the wall.jpg", "mask_strategy": "0"}
+a wooden table in front of a brick wall with bottles on the wall, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a wooden table in front of a brick wall with bottles on the wall.jpg", "mask_strategy": "0"}
+a wooden table in front of a brick wall with bottles on the wall, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a wooden table in front of a brick wall with bottles on the wall.jpg", "mask_strategy": "0"}
+a room filled with paintings and statues{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with paintings and statues.jpg", "mask_strategy": "0"}
+a room filled with paintings and statues, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with paintings and statues.jpg", "mask_strategy": "0"}
+a room filled with paintings and statues, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with paintings and statues.jpg", "mask_strategy": "0"}
+a room filled with paintings and statues, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with paintings and statues.jpg", "mask_strategy": "0"}
+a room filled with paintings and statues, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with paintings and statues.jpg", "mask_strategy": "0"}
+a room filled with paintings and statues, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with paintings and statues.jpg", "mask_strategy": "0"}
+a room filled with paintings and statues, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with paintings and statues.jpg", "mask_strategy": "0"}
+a room filled with paintings and statues, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with paintings and statues.jpg", "mask_strategy": "0"}
+an outdoor dining area surrounded by plants and a brick walkway{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an outdoor dining area surrounded by plants and a brick walkway.jpg", "mask_strategy": "0"}
+an outdoor dining area surrounded by plants and a brick walkway, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an outdoor dining area surrounded by plants and a brick walkway.jpg", "mask_strategy": "0"}
+an outdoor dining area surrounded by plants and a brick walkway, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an outdoor dining area surrounded by plants and a brick walkway.jpg", "mask_strategy": "0"}
+an outdoor dining area surrounded by plants and a brick walkway, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an outdoor dining area surrounded by plants and a brick walkway.jpg", "mask_strategy": "0"}
+an outdoor dining area surrounded by plants and a brick walkway, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an outdoor dining area surrounded by plants and a brick walkway.jpg", "mask_strategy": "0"}
+an outdoor dining area surrounded by plants and a brick walkway, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an outdoor dining area surrounded by plants and a brick walkway.jpg", "mask_strategy": "0"}
+an outdoor dining area surrounded by plants and a brick walkway, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an outdoor dining area surrounded by plants and a brick walkway.jpg", "mask_strategy": "0"}
+an outdoor dining area surrounded by plants and a brick walkway, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an outdoor dining area surrounded by plants and a brick walkway.jpg", "mask_strategy": "0"}
+a room filled with books and teddy bears{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with books and teddy bears.jpg", "mask_strategy": "0"}
+a room filled with books and teddy bears, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with books and teddy bears.jpg", "mask_strategy": "0"}
+a room filled with books and teddy bears, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with books and teddy bears.jpg", "mask_strategy": "0"}
+a room filled with books and teddy bears, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with books and teddy bears.jpg", "mask_strategy": "0"}
+a room filled with books and teddy bears, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with books and teddy bears.jpg", "mask_strategy": "0"}
+a room filled with books and teddy bears, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with books and teddy bears.jpg", "mask_strategy": "0"}
+a room filled with books and teddy bears, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with books and teddy bears.jpg", "mask_strategy": "0"}
+a room filled with books and teddy bears, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room filled with books and teddy bears.jpg", "mask_strategy": "0"}
+a table and chairs in a room with a plant in the corner{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with a plant in the corner.jpg", "mask_strategy": "0"}
+a table and chairs in a room with a plant in the corner, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with a plant in the corner.jpg", "mask_strategy": "0"}
+a table and chairs in a room with a plant in the corner, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with a plant in the corner.jpg", "mask_strategy": "0"}
+a table and chairs in a room with a plant in the corner, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with a plant in the corner.jpg", "mask_strategy": "0"}
+a table and chairs in a room with a plant in the corner, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with a plant in the corner.jpg", "mask_strategy": "0"}
+a table and chairs in a room with a plant in the corner, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with a plant in the corner.jpg", "mask_strategy": "0"}
+a table and chairs in a room with a plant in the corner, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with a plant in the corner.jpg", "mask_strategy": "0"}
+a table and chairs in a room with a plant in the corner, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table and chairs in a room with a plant in the corner.jpg", "mask_strategy": "0"}
+a living room with a couch, table, and a window{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a couch, table, and a window.jpg", "mask_strategy": "0"}
+a living room with a couch, table, and a window, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a couch, table, and a window.jpg", "mask_strategy": "0"}
+a living room with a couch, table, and a window, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a couch, table, and a window.jpg", "mask_strategy": "0"}
+a living room with a couch, table, and a window, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a couch, table, and a window.jpg", "mask_strategy": "0"}
+a living room with a couch, table, and a window, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a couch, table, and a window.jpg", "mask_strategy": "0"}
+a living room with a couch, table, and a window, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a couch, table, and a window.jpg", "mask_strategy": "0"}
+a living room with a couch, table, and a window, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a couch, table, and a window.jpg", "mask_strategy": "0"}
+a living room with a couch, table, and a window, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with a couch, table, and a window.jpg", "mask_strategy": "0"}
+a modern living room with wood floors and a tv{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a modern living room with wood floors and a tv.jpg", "mask_strategy": "0"}
+a modern living room with wood floors and a tv, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a modern living room with wood floors and a tv.jpg", "mask_strategy": "0"}
+a modern living room with wood floors and a tv, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a modern living room with wood floors and a tv.jpg", "mask_strategy": "0"}
+a modern living room with wood floors and a tv, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a modern living room with wood floors and a tv.jpg", "mask_strategy": "0"}
+a modern living room with wood floors and a tv, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a modern living room with wood floors and a tv.jpg", "mask_strategy": "0"}
+a modern living room with wood floors and a tv, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a modern living room with wood floors and a tv.jpg", "mask_strategy": "0"}
+a modern living room with wood floors and a tv, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a modern living room with wood floors and a tv.jpg", "mask_strategy": "0"}
+a modern living room with wood floors and a tv, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a modern living room with wood floors and a tv.jpg", "mask_strategy": "0"}
+a room with a desk and a chair in it{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a desk and a chair in it.jpg", "mask_strategy": "0"}
+a room with a desk and a chair in it, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a desk and a chair in it.jpg", "mask_strategy": "0"}
+a room with a desk and a chair in it, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a desk and a chair in it.jpg", "mask_strategy": "0"}
+a room with a desk and a chair in it, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a desk and a chair in it.jpg", "mask_strategy": "0"}
+a room with a desk and a chair in it, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a desk and a chair in it.jpg", "mask_strategy": "0"}
+a room with a desk and a chair in it, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a desk and a chair in it.jpg", "mask_strategy": "0"}
+a room with a desk and a chair in it, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a desk and a chair in it.jpg", "mask_strategy": "0"}
+a room with a desk and a chair in it, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a room with a desk and a chair in it.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a building{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a building.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a building, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a building.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a building, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a building.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a building, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a building.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a building, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a building.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a building, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a building.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a building, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a building.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a building, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a building.jpg", "mask_strategy": "0"}
+a chair in a room next to some drawings{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a chair in a room next to some drawings.jpg", "mask_strategy": "0"}
+a chair in a room next to some drawings, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a chair in a room next to some drawings.jpg", "mask_strategy": "0"}
+a chair in a room next to some drawings, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a chair in a room next to some drawings.jpg", "mask_strategy": "0"}
+a chair in a room next to some drawings, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a chair in a room next to some drawings.jpg", "mask_strategy": "0"}
+a chair in a room next to some drawings, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a chair in a room next to some drawings.jpg", "mask_strategy": "0"}
+a chair in a room next to some drawings, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a chair in a room next to some drawings.jpg", "mask_strategy": "0"}
+a chair in a room next to some drawings, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a chair in a room next to some drawings.jpg", "mask_strategy": "0"}
+a chair in a room next to some drawings, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a chair in a room next to some drawings.jpg", "mask_strategy": "0"}
+a living room with hardwood floors and a white couch{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with hardwood floors and a white couch.jpg", "mask_strategy": "0"}
+a living room with hardwood floors and a white couch, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with hardwood floors and a white couch.jpg", "mask_strategy": "0"}
+a living room with hardwood floors and a white couch, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with hardwood floors and a white couch.jpg", "mask_strategy": "0"}
+a living room with hardwood floors and a white couch, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with hardwood floors and a white couch.jpg", "mask_strategy": "0"}
+a living room with hardwood floors and a white couch, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with hardwood floors and a white couch.jpg", "mask_strategy": "0"}
+a living room with hardwood floors and a white couch, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with hardwood floors and a white couch.jpg", "mask_strategy": "0"}
+a living room with hardwood floors and a white couch, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with hardwood floors and a white couch.jpg", "mask_strategy": "0"}
+a living room with hardwood floors and a white couch, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a living room with hardwood floors and a white couch.jpg", "mask_strategy": "0"}
+two people in a canoe on a lake with mountains in the background{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two people in a canoe on a lake with mountains in the background.jpg", "mask_strategy": "0"}
+two people in a canoe on a lake with mountains in the background, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two people in a canoe on a lake with mountains in the background.jpg", "mask_strategy": "0"}
+two people in a canoe on a lake with mountains in the background, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two people in a canoe on a lake with mountains in the background.jpg", "mask_strategy": "0"}
+two people in a canoe on a lake with mountains in the background, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two people in a canoe on a lake with mountains in the background.jpg", "mask_strategy": "0"}
+two people in a canoe on a lake with mountains in the background, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two people in a canoe on a lake with mountains in the background.jpg", "mask_strategy": "0"}
+two people in a canoe on a lake with mountains in the background, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two people in a canoe on a lake with mountains in the background.jpg", "mask_strategy": "0"}
+two people in a canoe on a lake with mountains in the background, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two people in a canoe on a lake with mountains in the background.jpg", "mask_strategy": "0"}
+two people in a canoe on a lake with mountains in the background, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two people in a canoe on a lake with mountains in the background.jpg", "mask_strategy": "0"}
+an aerial view of a snowy road in a forest{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a snowy road in a forest.jpg", "mask_strategy": "0"}
+an aerial view of a snowy road in a forest, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a snowy road in a forest.jpg", "mask_strategy": "0"}
+an aerial view of a snowy road in a forest, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a snowy road in a forest.jpg", "mask_strategy": "0"}
+an aerial view of a snowy road in a forest, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a snowy road in a forest.jpg", "mask_strategy": "0"}
+an aerial view of a snowy road in a forest, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a snowy road in a forest.jpg", "mask_strategy": "0"}
+an aerial view of a snowy road in a forest, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a snowy road in a forest.jpg", "mask_strategy": "0"}
+an aerial view of a snowy road in a forest, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a snowy road in a forest.jpg", "mask_strategy": "0"}
+an aerial view of a snowy road in a forest, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a snowy road in a forest.jpg", "mask_strategy": "0"}
+a view of a waterfall from a distance{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a waterfall from a distance.jpg", "mask_strategy": "0"}
+a view of a waterfall from a distance, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a waterfall from a distance.jpg", "mask_strategy": "0"}
+a view of a waterfall from a distance, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a waterfall from a distance.jpg", "mask_strategy": "0"}
+a view of a waterfall from a distance, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a waterfall from a distance.jpg", "mask_strategy": "0"}
+a view of a waterfall from a distance, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a waterfall from a distance.jpg", "mask_strategy": "0"}
+a view of a waterfall from a distance, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a waterfall from a distance.jpg", "mask_strategy": "0"}
+a view of a waterfall from a distance, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a waterfall from a distance.jpg", "mask_strategy": "0"}
+a view of a waterfall from a distance, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a view of a waterfall from a distance.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a valley{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a valley.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a valley, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a valley.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a valley, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a valley.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a valley, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a valley.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a valley, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a valley.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a valley, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a valley.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a valley, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a valley.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a valley, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a valley.jpg", "mask_strategy": "0"}
+an aerial view of a group of islands in the middle of a lake{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a group of islands in the middle of a lake.jpg", "mask_strategy": "0"}
+an aerial view of a group of islands in the middle of a lake, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a group of islands in the middle of a lake.jpg", "mask_strategy": "0"}
+an aerial view of a group of islands in the middle of a lake, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a group of islands in the middle of a lake.jpg", "mask_strategy": "0"}
+an aerial view of a group of islands in the middle of a lake, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a group of islands in the middle of a lake.jpg", "mask_strategy": "0"}
+an aerial view of a group of islands in the middle of a lake, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a group of islands in the middle of a lake.jpg", "mask_strategy": "0"}
+an aerial view of a group of islands in the middle of a lake, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a group of islands in the middle of a lake.jpg", "mask_strategy": "0"}
+an aerial view of a group of islands in the middle of a lake, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a group of islands in the middle of a lake.jpg", "mask_strategy": "0"}
+an aerial view of a group of islands in the middle of a lake, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a group of islands in the middle of a lake.jpg", "mask_strategy": "0"}
+an aerial view of a rocky beach in indonesia{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a rocky beach in indonesia.jpg", "mask_strategy": "0"}
+an aerial view of a rocky beach in indonesia, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a rocky beach in indonesia.jpg", "mask_strategy": "0"}
+an aerial view of a rocky beach in indonesia, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a rocky beach in indonesia.jpg", "mask_strategy": "0"}
+an aerial view of a rocky beach in indonesia, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a rocky beach in indonesia.jpg", "mask_strategy": "0"}
+an aerial view of a rocky beach in indonesia, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a rocky beach in indonesia.jpg", "mask_strategy": "0"}
+an aerial view of a rocky beach in indonesia, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a rocky beach in indonesia.jpg", "mask_strategy": "0"}
+an aerial view of a rocky beach in indonesia, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a rocky beach in indonesia.jpg", "mask_strategy": "0"}
+an aerial view of a rocky beach in indonesia, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a rocky beach in indonesia.jpg", "mask_strategy": "0"}
+fireworks in the night sky over a city{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/fireworks in the night sky over a city.jpg", "mask_strategy": "0"}
+fireworks in the night sky over a city, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/fireworks in the night sky over a city.jpg", "mask_strategy": "0"}
+fireworks in the night sky over a city, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/fireworks in the night sky over a city.jpg", "mask_strategy": "0"}
+fireworks in the night sky over a city, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/fireworks in the night sky over a city.jpg", "mask_strategy": "0"}
+fireworks in the night sky over a city, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/fireworks in the night sky over a city.jpg", "mask_strategy": "0"}
+fireworks in the night sky over a city, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/fireworks in the night sky over a city.jpg", "mask_strategy": "0"}
+fireworks in the night sky over a city, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/fireworks in the night sky over a city.jpg", "mask_strategy": "0"}
+fireworks in the night sky over a city, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/fireworks in the night sky over a city.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse on a stormy day{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse on a stormy day.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse on a stormy day, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse on a stormy day.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse on a stormy day, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse on a stormy day.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse on a stormy day, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse on a stormy day.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse on a stormy day, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse on a stormy day.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse on a stormy day, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse on a stormy day.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse on a stormy day, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse on a stormy day.jpg", "mask_strategy": "0"}
+a large wave crashes into a lighthouse on a stormy day, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes into a lighthouse on a stormy day.jpg", "mask_strategy": "0"}
+a mountain range with a sky background{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with a sky background.jpg", "mask_strategy": "0"}
+a mountain range with a sky background, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with a sky background.jpg", "mask_strategy": "0"}
+a mountain range with a sky background, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with a sky background.jpg", "mask_strategy": "0"}
+a mountain range with a sky background, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with a sky background.jpg", "mask_strategy": "0"}
+a mountain range with a sky background, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with a sky background.jpg", "mask_strategy": "0"}
+a mountain range with a sky background, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with a sky background.jpg", "mask_strategy": "0"}
+a mountain range with a sky background, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with a sky background.jpg", "mask_strategy": "0"}
+a mountain range with a sky background, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with a sky background.jpg", "mask_strategy": "0"}
+a large bonfire is burning in the night sky{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large bonfire is burning in the night sky.jpg", "mask_strategy": "0"}
+a large bonfire is burning in the night sky, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large bonfire is burning in the night sky.jpg", "mask_strategy": "0"}
+a large bonfire is burning in the night sky, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large bonfire is burning in the night sky.jpg", "mask_strategy": "0"}
+a large bonfire is burning in the night sky, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large bonfire is burning in the night sky.jpg", "mask_strategy": "0"}
+a large bonfire is burning in the night sky, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large bonfire is burning in the night sky.jpg", "mask_strategy": "0"}
+a large bonfire is burning in the night sky, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large bonfire is burning in the night sky.jpg", "mask_strategy": "0"}
+a large bonfire is burning in the night sky, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large bonfire is burning in the night sky.jpg", "mask_strategy": "0"}
+a large bonfire is burning in the night sky, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large bonfire is burning in the night sky.jpg", "mask_strategy": "0"}
+a close-up view of the flames of a fireplace{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of the flames of a fireplace.jpg", "mask_strategy": "0"}
+a close-up view of the flames of a fireplace, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of the flames of a fireplace.jpg", "mask_strategy": "0"}
+a close-up view of the flames of a fireplace, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of the flames of a fireplace.jpg", "mask_strategy": "0"}
+a close-up view of the flames of a fireplace, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of the flames of a fireplace.jpg", "mask_strategy": "0"}
+a close-up view of the flames of a fireplace, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of the flames of a fireplace.jpg", "mask_strategy": "0"}
+a close-up view of the flames of a fireplace, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of the flames of a fireplace.jpg", "mask_strategy": "0"}
+a close-up view of the flames of a fireplace, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of the flames of a fireplace.jpg", "mask_strategy": "0"}
+a close-up view of the flames of a fireplace, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of the flames of a fireplace.jpg", "mask_strategy": "0"}
+a farm in the middle of the day{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a farm in the middle of the day.jpg", "mask_strategy": "0"}
+a farm in the middle of the day, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a farm in the middle of the day.jpg", "mask_strategy": "0"}
+a farm in the middle of the day, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a farm in the middle of the day.jpg", "mask_strategy": "0"}
+a farm in the middle of the day, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a farm in the middle of the day.jpg", "mask_strategy": "0"}
+a farm in the middle of the day, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a farm in the middle of the day.jpg", "mask_strategy": "0"}
+a farm in the middle of the day, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a farm in the middle of the day.jpg", "mask_strategy": "0"}
+a farm in the middle of the day, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a farm in the middle of the day.jpg", "mask_strategy": "0"}
+a farm in the middle of the day, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a farm in the middle of the day.jpg", "mask_strategy": "0"}
+a flock of birds flying over a tree at sunset{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a flock of birds flying over a tree at sunset.jpg", "mask_strategy": "0"}
+a flock of birds flying over a tree at sunset, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a flock of birds flying over a tree at sunset.jpg", "mask_strategy": "0"}
+a flock of birds flying over a tree at sunset, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a flock of birds flying over a tree at sunset.jpg", "mask_strategy": "0"}
+a flock of birds flying over a tree at sunset, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a flock of birds flying over a tree at sunset.jpg", "mask_strategy": "0"}
+a flock of birds flying over a tree at sunset, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a flock of birds flying over a tree at sunset.jpg", "mask_strategy": "0"}
+a flock of birds flying over a tree at sunset, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a flock of birds flying over a tree at sunset.jpg", "mask_strategy": "0"}
+a flock of birds flying over a tree at sunset, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a flock of birds flying over a tree at sunset.jpg", "mask_strategy": "0"}
+a flock of birds flying over a tree at sunset, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a flock of birds flying over a tree at sunset.jpg", "mask_strategy": "0"}
+a captivating scene featuring a spiral galaxy shining brilliantly in the night sky{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a captivating scene featuring a spiral galaxy shining brilliantly in the night sky.jpg", "mask_strategy": "0"}
+a captivating scene featuring a spiral galaxy shining brilliantly in the night sky, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a captivating scene featuring a spiral galaxy shining brilliantly in the night sky.jpg", "mask_strategy": "0"}
+a captivating scene featuring a spiral galaxy shining brilliantly in the night sky, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a captivating scene featuring a spiral galaxy shining brilliantly in the night sky.jpg", "mask_strategy": "0"}
+a captivating scene featuring a spiral galaxy shining brilliantly in the night sky, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a captivating scene featuring a spiral galaxy shining brilliantly in the night sky.jpg", "mask_strategy": "0"}
+a captivating scene featuring a spiral galaxy shining brilliantly in the night sky, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a captivating scene featuring a spiral galaxy shining brilliantly in the night sky.jpg", "mask_strategy": "0"}
+a captivating scene featuring a spiral galaxy shining brilliantly in the night sky, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a captivating scene featuring a spiral galaxy shining brilliantly in the night sky.jpg", "mask_strategy": "0"}
+a captivating scene featuring a spiral galaxy shining brilliantly in the night sky, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a captivating scene featuring a spiral galaxy shining brilliantly in the night sky.jpg", "mask_strategy": "0"}
+a captivating scene featuring a spiral galaxy shining brilliantly in the night sky, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a captivating scene featuring a spiral galaxy shining brilliantly in the night sky.jpg", "mask_strategy": "0"}
+a mountain with snow on it{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain with snow on it.jpg", "mask_strategy": "0"}
+a mountain with snow on it, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain with snow on it.jpg", "mask_strategy": "0"}
+a mountain with snow on it, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain with snow on it.jpg", "mask_strategy": "0"}
+a mountain with snow on it, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain with snow on it.jpg", "mask_strategy": "0"}
+a mountain with snow on it, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain with snow on it.jpg", "mask_strategy": "0"}
+a mountain with snow on it, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain with snow on it.jpg", "mask_strategy": "0"}
+a mountain with snow on it, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain with snow on it.jpg", "mask_strategy": "0"}
+a mountain with snow on it, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain with snow on it.jpg", "mask_strategy": "0"}
+a bridge that is in the middle of a river{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is in the middle of a river.jpg", "mask_strategy": "0"}
+a bridge that is in the middle of a river, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is in the middle of a river.jpg", "mask_strategy": "0"}
+a bridge that is in the middle of a river, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is in the middle of a river.jpg", "mask_strategy": "0"}
+a bridge that is in the middle of a river, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is in the middle of a river.jpg", "mask_strategy": "0"}
+a bridge that is in the middle of a river, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is in the middle of a river.jpg", "mask_strategy": "0"}
+a bridge that is in the middle of a river, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is in the middle of a river.jpg", "mask_strategy": "0"}
+a bridge that is in the middle of a river, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is in the middle of a river.jpg", "mask_strategy": "0"}
+a bridge that is in the middle of a river, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bridge that is in the middle of a river.jpg", "mask_strategy": "0"}
+a group of people standing on top of a green hill{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of people standing on top of a green hill.jpg", "mask_strategy": "0"}
+a group of people standing on top of a green hill, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of people standing on top of a green hill.jpg", "mask_strategy": "0"}
+a group of people standing on top of a green hill, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of people standing on top of a green hill.jpg", "mask_strategy": "0"}
+a group of people standing on top of a green hill, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of people standing on top of a green hill.jpg", "mask_strategy": "0"}
+a group of people standing on top of a green hill, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of people standing on top of a green hill.jpg", "mask_strategy": "0"}
+a group of people standing on top of a green hill, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of people standing on top of a green hill.jpg", "mask_strategy": "0"}
+a group of people standing on top of a green hill, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of people standing on top of a green hill.jpg", "mask_strategy": "0"}
+a group of people standing on top of a green hill, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of people standing on top of a green hill.jpg", "mask_strategy": "0"}
+a sandy beach with a wooden pier in the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with a wooden pier in the water.jpg", "mask_strategy": "0"}
+a sandy beach with a wooden pier in the water, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with a wooden pier in the water.jpg", "mask_strategy": "0"}
+a sandy beach with a wooden pier in the water, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with a wooden pier in the water.jpg", "mask_strategy": "0"}
+a sandy beach with a wooden pier in the water, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with a wooden pier in the water.jpg", "mask_strategy": "0"}
+a sandy beach with a wooden pier in the water, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with a wooden pier in the water.jpg", "mask_strategy": "0"}
+a sandy beach with a wooden pier in the water, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with a wooden pier in the water.jpg", "mask_strategy": "0"}
+a sandy beach with a wooden pier in the water, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with a wooden pier in the water.jpg", "mask_strategy": "0"}
+a sandy beach with a wooden pier in the water, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with a wooden pier in the water.jpg", "mask_strategy": "0"}
+a lake surrounded by mountains and flowers{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a lake surrounded by mountains and flowers.jpg", "mask_strategy": "0"}
+a lake surrounded by mountains and flowers, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a lake surrounded by mountains and flowers.jpg", "mask_strategy": "0"}
+a lake surrounded by mountains and flowers, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a lake surrounded by mountains and flowers.jpg", "mask_strategy": "0"}
+a lake surrounded by mountains and flowers, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a lake surrounded by mountains and flowers.jpg", "mask_strategy": "0"}
+a lake surrounded by mountains and flowers, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a lake surrounded by mountains and flowers.jpg", "mask_strategy": "0"}
+a lake surrounded by mountains and flowers, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a lake surrounded by mountains and flowers.jpg", "mask_strategy": "0"}
+a lake surrounded by mountains and flowers, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a lake surrounded by mountains and flowers.jpg", "mask_strategy": "0"}
+a lake surrounded by mountains and flowers, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a lake surrounded by mountains and flowers.jpg", "mask_strategy": "0"}
+a hot-air balloon flying over a desert landscape{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a hot-air balloon flying over a desert landscape.jpg", "mask_strategy": "0"}
+a hot-air balloon flying over a desert landscape, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a hot-air balloon flying over a desert landscape.jpg", "mask_strategy": "0"}
+a hot-air balloon flying over a desert landscape, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a hot-air balloon flying over a desert landscape.jpg", "mask_strategy": "0"}
+a hot-air balloon flying over a desert landscape, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a hot-air balloon flying over a desert landscape.jpg", "mask_strategy": "0"}
+a hot-air balloon flying over a desert landscape, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a hot-air balloon flying over a desert landscape.jpg", "mask_strategy": "0"}
+a hot-air balloon flying over a desert landscape, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a hot-air balloon flying over a desert landscape.jpg", "mask_strategy": "0"}
+a hot-air balloon flying over a desert landscape, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a hot-air balloon flying over a desert landscape.jpg", "mask_strategy": "0"}
+a hot-air balloon flying over a desert landscape, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a hot-air balloon flying over a desert landscape.jpg", "mask_strategy": "0"}
+several hot air balloons flying over a city{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/several hot air balloons flying over a city.jpg", "mask_strategy": "0"}
+several hot air balloons flying over a city, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/several hot air balloons flying over a city.jpg", "mask_strategy": "0"}
+several hot air balloons flying over a city, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/several hot air balloons flying over a city.jpg", "mask_strategy": "0"}
+several hot air balloons flying over a city, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/several hot air balloons flying over a city.jpg", "mask_strategy": "0"}
+several hot air balloons flying over a city, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/several hot air balloons flying over a city.jpg", "mask_strategy": "0"}
+several hot air balloons flying over a city, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/several hot air balloons flying over a city.jpg", "mask_strategy": "0"}
+several hot air balloons flying over a city, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/several hot air balloons flying over a city.jpg", "mask_strategy": "0"}
+several hot air balloons flying over a city, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/several hot air balloons flying over a city.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a field{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a field.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a field, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a field.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a field, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a field.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a field, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a field.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a field, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a field.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a field, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a field.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a field, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a field.jpg", "mask_strategy": "0"}
+a group of hot air balloons flying over a field, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of hot air balloons flying over a field.jpg", "mask_strategy": "0"}
+a large wave crashes over a rocky cliff{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes over a rocky cliff.jpg", "mask_strategy": "0"}
+a large wave crashes over a rocky cliff, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes over a rocky cliff.jpg", "mask_strategy": "0"}
+a large wave crashes over a rocky cliff, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes over a rocky cliff.jpg", "mask_strategy": "0"}
+a large wave crashes over a rocky cliff, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes over a rocky cliff.jpg", "mask_strategy": "0"}
+a large wave crashes over a rocky cliff, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes over a rocky cliff.jpg", "mask_strategy": "0"}
+a large wave crashes over a rocky cliff, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes over a rocky cliff.jpg", "mask_strategy": "0"}
+a large wave crashes over a rocky cliff, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes over a rocky cliff.jpg", "mask_strategy": "0"}
+a large wave crashes over a rocky cliff, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave crashes over a rocky cliff.jpg", "mask_strategy": "0"}
+the sun is setting over a lake in the mountains{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is setting over a lake in the mountains.jpg", "mask_strategy": "0"}
+the sun is setting over a lake in the mountains, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is setting over a lake in the mountains.jpg", "mask_strategy": "0"}
+the sun is setting over a lake in the mountains, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is setting over a lake in the mountains.jpg", "mask_strategy": "0"}
+the sun is setting over a lake in the mountains, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is setting over a lake in the mountains.jpg", "mask_strategy": "0"}
+the sun is setting over a lake in the mountains, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is setting over a lake in the mountains.jpg", "mask_strategy": "0"}
+the sun is setting over a lake in the mountains, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is setting over a lake in the mountains.jpg", "mask_strategy": "0"}
+the sun is setting over a lake in the mountains, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is setting over a lake in the mountains.jpg", "mask_strategy": "0"}
+the sun is setting over a lake in the mountains, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is setting over a lake in the mountains.jpg", "mask_strategy": "0"}
+a mountain range with snow on the ground{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with snow on the ground.jpg", "mask_strategy": "0"}
+a mountain range with snow on the ground, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with snow on the ground.jpg", "mask_strategy": "0"}
+a mountain range with snow on the ground, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with snow on the ground.jpg", "mask_strategy": "0"}
+a mountain range with snow on the ground, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with snow on the ground.jpg", "mask_strategy": "0"}
+a mountain range with snow on the ground, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with snow on the ground.jpg", "mask_strategy": "0"}
+a mountain range with snow on the ground, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with snow on the ground.jpg", "mask_strategy": "0"}
+a mountain range with snow on the ground, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with snow on the ground.jpg", "mask_strategy": "0"}
+a mountain range with snow on the ground, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain range with snow on the ground.jpg", "mask_strategy": "0"}
+sun rays shining through clouds over a lake{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/sun rays shining through clouds over a lake.jpg", "mask_strategy": "0"}
+sun rays shining through clouds over a lake, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/sun rays shining through clouds over a lake.jpg", "mask_strategy": "0"}
+sun rays shining through clouds over a lake, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/sun rays shining through clouds over a lake.jpg", "mask_strategy": "0"}
+sun rays shining through clouds over a lake, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/sun rays shining through clouds over a lake.jpg", "mask_strategy": "0"}
+sun rays shining through clouds over a lake, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/sun rays shining through clouds over a lake.jpg", "mask_strategy": "0"}
+sun rays shining through clouds over a lake, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/sun rays shining through clouds over a lake.jpg", "mask_strategy": "0"}
+sun rays shining through clouds over a lake, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/sun rays shining through clouds over a lake.jpg", "mask_strategy": "0"}
+sun rays shining through clouds over a lake, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/sun rays shining through clouds over a lake.jpg", "mask_strategy": "0"}
+a boat sits on the shore of a lake with mt fuji in the background{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a boat sits on the shore of a lake with mt fuji in the background.jpg", "mask_strategy": "0"}
+a boat sits on the shore of a lake with mt fuji in the background, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a boat sits on the shore of a lake with mt fuji in the background.jpg", "mask_strategy": "0"}
+a boat sits on the shore of a lake with mt fuji in the background, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a boat sits on the shore of a lake with mt fuji in the background.jpg", "mask_strategy": "0"}
+a boat sits on the shore of a lake with mt fuji in the background, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a boat sits on the shore of a lake with mt fuji in the background.jpg", "mask_strategy": "0"}
+a boat sits on the shore of a lake with mt fuji in the background, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a boat sits on the shore of a lake with mt fuji in the background.jpg", "mask_strategy": "0"}
+a boat sits on the shore of a lake with mt fuji in the background, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a boat sits on the shore of a lake with mt fuji in the background.jpg", "mask_strategy": "0"}
+a boat sits on the shore of a lake with mt fuji in the background, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a boat sits on the shore of a lake with mt fuji in the background.jpg", "mask_strategy": "0"}
+a boat sits on the shore of a lake with mt fuji in the background, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a boat sits on the shore of a lake with mt fuji in the background.jpg", "mask_strategy": "0"}
+a foggy road with trees in the distance{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy road with trees in the distance.jpg", "mask_strategy": "0"}
+a foggy road with trees in the distance, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy road with trees in the distance.jpg", "mask_strategy": "0"}
+a foggy road with trees in the distance, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy road with trees in the distance.jpg", "mask_strategy": "0"}
+a foggy road with trees in the distance, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy road with trees in the distance.jpg", "mask_strategy": "0"}
+a foggy road with trees in the distance, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy road with trees in the distance.jpg", "mask_strategy": "0"}
+a foggy road with trees in the distance, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy road with trees in the distance.jpg", "mask_strategy": "0"}
+a foggy road with trees in the distance, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy road with trees in the distance.jpg", "mask_strategy": "0"}
+a foggy road with trees in the distance, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy road with trees in the distance.jpg", "mask_strategy": "0"}
+two swans swimming on a lake in the fog{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two swans swimming on a lake in the fog.jpg", "mask_strategy": "0"}
+two swans swimming on a lake in the fog, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two swans swimming on a lake in the fog.jpg", "mask_strategy": "0"}
+two swans swimming on a lake in the fog, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two swans swimming on a lake in the fog.jpg", "mask_strategy": "0"}
+two swans swimming on a lake in the fog, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two swans swimming on a lake in the fog.jpg", "mask_strategy": "0"}
+two swans swimming on a lake in the fog, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two swans swimming on a lake in the fog.jpg", "mask_strategy": "0"}
+two swans swimming on a lake in the fog, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two swans swimming on a lake in the fog.jpg", "mask_strategy": "0"}
+two swans swimming on a lake in the fog, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two swans swimming on a lake in the fog.jpg", "mask_strategy": "0"}
+two swans swimming on a lake in the fog, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two swans swimming on a lake in the fog.jpg", "mask_strategy": "0"}
+the sun is shining through the trees near a waterfall{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is shining through the trees near a waterfall.jpg", "mask_strategy": "0"}
+the sun is shining through the trees near a waterfall, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is shining through the trees near a waterfall.jpg", "mask_strategy": "0"}
+the sun is shining through the trees near a waterfall, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is shining through the trees near a waterfall.jpg", "mask_strategy": "0"}
+the sun is shining through the trees near a waterfall, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is shining through the trees near a waterfall.jpg", "mask_strategy": "0"}
+the sun is shining through the trees near a waterfall, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is shining through the trees near a waterfall.jpg", "mask_strategy": "0"}
+the sun is shining through the trees near a waterfall, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is shining through the trees near a waterfall.jpg", "mask_strategy": "0"}
+the sun is shining through the trees near a waterfall, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is shining through the trees near a waterfall.jpg", "mask_strategy": "0"}
+the sun is shining through the trees near a waterfall, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the sun is shining through the trees near a waterfall.jpg", "mask_strategy": "0"}
+a sandy beach with palm trees on the shore{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with palm trees on the shore.jpg", "mask_strategy": "0"}
+a sandy beach with palm trees on the shore, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with palm trees on the shore.jpg", "mask_strategy": "0"}
+a sandy beach with palm trees on the shore, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with palm trees on the shore.jpg", "mask_strategy": "0"}
+a sandy beach with palm trees on the shore, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with palm trees on the shore.jpg", "mask_strategy": "0"}
+a sandy beach with palm trees on the shore, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with palm trees on the shore.jpg", "mask_strategy": "0"}
+a sandy beach with palm trees on the shore, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with palm trees on the shore.jpg", "mask_strategy": "0"}
+a sandy beach with palm trees on the shore, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with palm trees on the shore.jpg", "mask_strategy": "0"}
+a sandy beach with palm trees on the shore, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sandy beach with palm trees on the shore.jpg", "mask_strategy": "0"}
+an aerial view of a body of water and a beach{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a body of water and a beach.jpg", "mask_strategy": "0"}
+an aerial view of a body of water and a beach, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a body of water and a beach.jpg", "mask_strategy": "0"}
+an aerial view of a body of water and a beach, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a body of water and a beach.jpg", "mask_strategy": "0"}
+an aerial view of a body of water and a beach, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a body of water and a beach.jpg", "mask_strategy": "0"}
+an aerial view of a body of water and a beach, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a body of water and a beach.jpg", "mask_strategy": "0"}
+an aerial view of a body of water and a beach, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a body of water and a beach.jpg", "mask_strategy": "0"}
+an aerial view of a body of water and a beach, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a body of water and a beach.jpg", "mask_strategy": "0"}
+an aerial view of a body of water and a beach, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a body of water and a beach.jpg", "mask_strategy": "0"}
+a foggy field that has trees in the grass{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy field that has trees in the grass.jpg", "mask_strategy": "0"}
+a foggy field that has trees in the grass, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy field that has trees in the grass.jpg", "mask_strategy": "0"}
+a foggy field that has trees in the grass, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy field that has trees in the grass.jpg", "mask_strategy": "0"}
+a foggy field that has trees in the grass, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy field that has trees in the grass.jpg", "mask_strategy": "0"}
+a foggy field that has trees in the grass, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy field that has trees in the grass.jpg", "mask_strategy": "0"}
+a foggy field that has trees in the grass, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy field that has trees in the grass.jpg", "mask_strategy": "0"}
+a foggy field that has trees in the grass, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy field that has trees in the grass.jpg", "mask_strategy": "0"}
+a foggy field that has trees in the grass, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy field that has trees in the grass.jpg", "mask_strategy": "0"}
+a foggy landscape with trees and hills in the distance{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy landscape with trees and hills in the distance.jpg", "mask_strategy": "0"}
+a foggy landscape with trees and hills in the distance, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy landscape with trees and hills in the distance.jpg", "mask_strategy": "0"}
+a foggy landscape with trees and hills in the distance, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy landscape with trees and hills in the distance.jpg", "mask_strategy": "0"}
+a foggy landscape with trees and hills in the distance, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy landscape with trees and hills in the distance.jpg", "mask_strategy": "0"}
+a foggy landscape with trees and hills in the distance, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy landscape with trees and hills in the distance.jpg", "mask_strategy": "0"}
+a foggy landscape with trees and hills in the distance, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy landscape with trees and hills in the distance.jpg", "mask_strategy": "0"}
+a foggy landscape with trees and hills in the distance, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy landscape with trees and hills in the distance.jpg", "mask_strategy": "0"}
+a foggy landscape with trees and hills in the distance, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a foggy landscape with trees and hills in the distance.jpg", "mask_strategy": "0"}
+a large wave in the ocean with a lot of spray coming from it{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave in the ocean with a lot of spray coming from it.jpg", "mask_strategy": "0"}
+a large wave in the ocean with a lot of spray coming from it, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave in the ocean with a lot of spray coming from it.jpg", "mask_strategy": "0"}
+a large wave in the ocean with a lot of spray coming from it, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave in the ocean with a lot of spray coming from it.jpg", "mask_strategy": "0"}
+a large wave in the ocean with a lot of spray coming from it, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave in the ocean with a lot of spray coming from it.jpg", "mask_strategy": "0"}
+a large wave in the ocean with a lot of spray coming from it, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave in the ocean with a lot of spray coming from it.jpg", "mask_strategy": "0"}
+a large wave in the ocean with a lot of spray coming from it, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave in the ocean with a lot of spray coming from it.jpg", "mask_strategy": "0"}
+a large wave in the ocean with a lot of spray coming from it, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave in the ocean with a lot of spray coming from it.jpg", "mask_strategy": "0"}
+a large wave in the ocean with a lot of spray coming from it, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large wave in the ocean with a lot of spray coming from it.jpg", "mask_strategy": "0"}
+a waterfall is shown in the middle of a lush green hillside{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a waterfall is shown in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a waterfall is shown in the middle of a lush green hillside, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a waterfall is shown in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a waterfall is shown in the middle of a lush green hillside, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a waterfall is shown in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a waterfall is shown in the middle of a lush green hillside, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a waterfall is shown in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a waterfall is shown in the middle of a lush green hillside, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a waterfall is shown in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a waterfall is shown in the middle of a lush green hillside, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a waterfall is shown in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a waterfall is shown in the middle of a lush green hillside, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a waterfall is shown in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a waterfall is shown in the middle of a lush green hillside, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a waterfall is shown in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+an aerial view of a curvy road in the middle of a forest{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a curvy road in the middle of a forest.jpg", "mask_strategy": "0"}
+an aerial view of a curvy road in the middle of a forest, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a curvy road in the middle of a forest.jpg", "mask_strategy": "0"}
+an aerial view of a curvy road in the middle of a forest, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a curvy road in the middle of a forest.jpg", "mask_strategy": "0"}
+an aerial view of a curvy road in the middle of a forest, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a curvy road in the middle of a forest.jpg", "mask_strategy": "0"}
+an aerial view of a curvy road in the middle of a forest, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a curvy road in the middle of a forest.jpg", "mask_strategy": "0"}
+an aerial view of a curvy road in the middle of a forest, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a curvy road in the middle of a forest.jpg", "mask_strategy": "0"}
+an aerial view of a curvy road in the middle of a forest, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a curvy road in the middle of a forest.jpg", "mask_strategy": "0"}
+an aerial view of a curvy road in the middle of a forest, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an aerial view of a curvy road in the middle of a forest.jpg", "mask_strategy": "0"}
+a mountain covered in snow with evergreen trees{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain covered in snow with evergreen trees.jpg", "mask_strategy": "0"}
+a mountain covered in snow with evergreen trees, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain covered in snow with evergreen trees.jpg", "mask_strategy": "0"}
+a mountain covered in snow with evergreen trees, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain covered in snow with evergreen trees.jpg", "mask_strategy": "0"}
+a mountain covered in snow with evergreen trees, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain covered in snow with evergreen trees.jpg", "mask_strategy": "0"}
+a mountain covered in snow with evergreen trees, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain covered in snow with evergreen trees.jpg", "mask_strategy": "0"}
+a mountain covered in snow with evergreen trees, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain covered in snow with evergreen trees.jpg", "mask_strategy": "0"}
+a mountain covered in snow with evergreen trees, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain covered in snow with evergreen trees.jpg", "mask_strategy": "0"}
+a mountain covered in snow with evergreen trees, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a mountain covered in snow with evergreen trees.jpg", "mask_strategy": "0"}
+a very large waterfall in the middle of the day{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a very large waterfall in the middle of the day.jpg", "mask_strategy": "0"}
+a very large waterfall in the middle of the day, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a very large waterfall in the middle of the day.jpg", "mask_strategy": "0"}
+a very large waterfall in the middle of the day, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a very large waterfall in the middle of the day.jpg", "mask_strategy": "0"}
+a very large waterfall in the middle of the day, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a very large waterfall in the middle of the day.jpg", "mask_strategy": "0"}
+a very large waterfall in the middle of the day, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a very large waterfall in the middle of the day.jpg", "mask_strategy": "0"}
+a very large waterfall in the middle of the day, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a very large waterfall in the middle of the day.jpg", "mask_strategy": "0"}
+a very large waterfall in the middle of the day, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a very large waterfall in the middle of the day.jpg", "mask_strategy": "0"}
+a very large waterfall in the middle of the day, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a very large waterfall in the middle of the day.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a lush green hillside{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a lush green hillside, camera pans left{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a lush green hillside, camera pans right{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a lush green hillside, camera tilts up{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a lush green hillside, camera tilts down{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a lush green hillside, camera zooms in{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a lush green hillside, camera zooms out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a large waterfall in the middle of a lush green hillside, camera static{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large waterfall in the middle of a lush green hillside.jpg", "mask_strategy": "0"}
+a brown bear in the water with a fish in its mouth{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a brown bear in the water with a fish in its mouth.jpg", "mask_strategy": "0"}
+a close-up of a hippopotamus eating grass in a field{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up of a hippopotamus eating grass in a field.jpg", "mask_strategy": "0"}
+a sea turtle swimming in the ocean under the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sea turtle swimming in the ocean under the water.jpg", "mask_strategy": "0"}
+two bees are flying over a lavender plant{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two bees are flying over a lavender plant.jpg", "mask_strategy": "0"}
+the otter is standing in the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the otter is standing in the water.jpg", "mask_strategy": "0"}
+a dog carrying a soccer ball in its mouth{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dog carrying a soccer ball in its mouth.jpg", "mask_strategy": "0"}
+an eagle is flying over a mountain with trees in the background{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an eagle is flying over a mountain with trees in the background.jpg", "mask_strategy": "0"}
+a couple of horses are running in the dirt{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a couple of horses are running in the dirt.jpg", "mask_strategy": "0"}
+a highland cow with long horns standing in a field{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a highland cow with long horns standing in a field.jpg", "mask_strategy": "0"}
+a monkey is holding a banana in its mouth{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a monkey is holding a banana in its mouth.jpg", "mask_strategy": "0"}
+a large rhino grazing in the grass near a bush{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large rhino grazing in the grass near a bush.jpg", "mask_strategy": "0"}
+a butterfly sits on top of a purple flower{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a butterfly sits on top of a purple flower.jpg", "mask_strategy": "0"}
+an alligator is covered in green plants in the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an alligator is covered in green plants in the water.jpg", "mask_strategy": "0"}
+a red panda eating bamboo in a zoo{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a red panda eating bamboo in a zoo.jpg", "mask_strategy": "0"}
+a monochromatic video capturing a cat's gaze into the camera{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a monochromatic video capturing a cat's gaze into the camera.jpg", "mask_strategy": "0"}
+a frog sitting on top of water lily leaves{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a frog sitting on top of water lily leaves.jpg", "mask_strategy": "0"}
+a lion is roaring in the wild{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a lion is roaring in the wild.jpg", "mask_strategy": "0"}
+a seagull is flying towards a person's hand{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a seagull is flying towards a person's hand.jpg", "mask_strategy": "0"}
+a yellow and white jellyfish is floating in the ocean{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a yellow and white jellyfish is floating in the ocean.jpg", "mask_strategy": "0"}
+a group of jellyfish swimming in an aquarium{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of jellyfish swimming in an aquarium.jpg", "mask_strategy": "0"}
+a clown fish hiding in a purple anemone{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a clown fish hiding in a purple anemone.jpg", "mask_strategy": "0"}
+a snake sitting on the ground next to a bowl{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a snake sitting on the ground next to a bowl.jpg", "mask_strategy": "0"}
+a brown and white cow eating hay{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a brown and white cow eating hay.jpg", "mask_strategy": "0"}
+a seal swimming in the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a seal swimming in the water.jpg", "mask_strategy": "0"}
+a panda bear is eating a piece of bamboo{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a panda bear is eating a piece of bamboo.jpg", "mask_strategy": "0"}
+a small bird sits on a moss covered branch{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a small bird sits on a moss covered branch.jpg", "mask_strategy": "0"}
+a bird with a fish in its beak flying over a field{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bird with a fish in its beak flying over a field.jpg", "mask_strategy": "0"}
+a large flock of birds flying in the sky{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large flock of birds flying in the sky.jpg", "mask_strategy": "0"}
+a bald eagle flying over a tree filled forest{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bald eagle flying over a tree filled forest.jpg", "mask_strategy": "0"}
+a giraffe walking in a field{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a giraffe walking in a field.jpg", "mask_strategy": "0"}
+a lioness yawning in a field{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a lioness yawning in a field.jpg", "mask_strategy": "0"}
+a little crab scurried on the sandy beach{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a little crab scurried on the sandy beach.jpg", "mask_strategy": "0"}
+a warthog is walking in the grass{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a warthog is walking in the grass.jpg", "mask_strategy": "0"}
+a penguin walking on a beach near the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a penguin walking on a beach near the water.jpg", "mask_strategy": "0"}
+a tiger walking through a wooded area{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a tiger walking through a wooded area.jpg", "mask_strategy": "0"}
+a tiger walking on a dirt path in the woods{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a tiger walking on a dirt path in the woods.jpg", "mask_strategy": "0"}
+a small monkey holding a piece of food in it's mouth{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a small monkey holding a piece of food in it's mouth.jpg", "mask_strategy": "0"}
+a squirrel sitting on the ground eating a piece of bread{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a squirrel sitting on the ground eating a piece of bread.jpg", "mask_strategy": "0"}
+a group of fish swimming over a coral reef{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of fish swimming over a coral reef.jpg", "mask_strategy": "0"}
+a toad is sitting on top of some moss{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a toad is sitting on top of some moss.jpg", "mask_strategy": "0"}
+a great white shark swimming in the ocean{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a great white shark swimming in the ocean.jpg", "mask_strategy": "0"}
+a group of camels resting in the desert{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of camels resting in the desert.jpg", "mask_strategy": "0"}
+two sheep grazing in the grass next to a wooden bridge{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two sheep grazing in the grass next to a wooden bridge.jpg", "mask_strategy": "0"}
+an elephant walking through a forest{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an elephant walking through a forest.jpg", "mask_strategy": "0"}
+a white rooster standing in a grassy field{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a white rooster standing in a grassy field.jpg", "mask_strategy": "0"}
+a zebra walking across a dirt road near a field{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a zebra walking across a dirt road near a field.jpg", "mask_strategy": "0"}
+cars are driving down a street lined with tall trees{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/cars are driving down a street lined with tall trees.jpg", "mask_strategy": "0"}
+the cars on the street are waiting for the traffic lights{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the cars on the street are waiting for the traffic lights.jpg", "mask_strategy": "0"}
+a bicycle leaning against a fence in the snow{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bicycle leaning against a fence in the snow.jpg", "mask_strategy": "0"}
+a blue fishing boat is navigating in the ocean next to a cruise ship{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a blue fishing boat is navigating in the ocean next to a cruise ship.jpg", "mask_strategy": "0"}
+a blue car driving down a dirt road near train tracks{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a blue car driving down a dirt road near train tracks.jpg", "mask_strategy": "0"}
+a sailboat is drifting on the ocean{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sailboat is drifting on the ocean.jpg", "mask_strategy": "0"}
+a couple of boats floating on a body of water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a couple of boats floating on a body of water.jpg", "mask_strategy": "0"}
+a city street with cars driving in the rain{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a city street with cars driving in the rain.jpg", "mask_strategy": "0"}
+a red and white tram traveling down a snowy street{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a red and white tram traveling down a snowy street.jpg", "mask_strategy": "0"}
+a city bus driving down a snowy street at night{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a city bus driving down a snowy street at night.jpg", "mask_strategy": "0"}
+a green toy car is sitting on the ground{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a green toy car is sitting on the ground.jpg", "mask_strategy": "0"}
+a train traveling down tracks through the woods with leaves on the ground{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a train traveling down tracks through the woods with leaves on the ground.jpg", "mask_strategy": "0"}
+a man in a small boat fishing in the ocean{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man in a small boat fishing in the ocean.jpg", "mask_strategy": "0"}
+an airplane is flying through the sky at sunset{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an airplane is flying through the sky at sunset.jpg", "mask_strategy": "0"}
+an old rusty car sits in the middle of a field{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an old rusty car sits in the middle of a field.jpg", "mask_strategy": "0"}
+a motorcycle driving down a road{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a motorcycle driving down a road.jpg", "mask_strategy": "0"}
+a blue train traveling through a lush green area{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a blue train traveling through a lush green area.jpg", "mask_strategy": "0"}
+a white car is swiftly driving on a dirt road near a bush, kicking up dust{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a white car is swiftly driving on a dirt road near a bush, kicking up dust.jpg", "mask_strategy": "0"}
+a large cargo ship sailing in the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large cargo ship sailing in the water.jpg", "mask_strategy": "0"}
+the red Alfa sports car is speeding down the road{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/the red Alfa sports car is speeding down the road.jpg", "mask_strategy": "0"}
+two cars that have been involved in a violent collision{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two cars that have been involved in a violent collision.jpg", "mask_strategy": "0"}
+a red double decker bus driving down a street{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a red double decker bus driving down a street.jpg", "mask_strategy": "0"}
+A red sports car driving through sand, kicking up a large amount of dust{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A red sports car driving through sand, kicking up a large amount of dust.jpg", "mask_strategy": "0"}
+a yellow toy car parked on a rock near the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a yellow toy car parked on a rock near the water.jpg", "mask_strategy": "0"}
+a space shuttle taking off into the sky{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a space shuttle taking off into the sky.jpg", "mask_strategy": "0"}
+a steam train traveling through the woods{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a steam train traveling through the woods.jpg", "mask_strategy": "0"}
+a group of buses parked at a bus station{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of buses parked at a bus station.jpg", "mask_strategy": "0"}
+A bunch of cars are driving on a highway{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A bunch of cars are driving on a highway.jpg", "mask_strategy": "0"}
+a white and blue airplane flying in the sky{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a white and blue airplane flying in the sky.jpg", "mask_strategy": "0"}
+A space station orbited above the Earth{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A space station orbited above the Earth.jpg", "mask_strategy": "0"}
+A yellow boat is cruising in front of a bridge{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A yellow boat is cruising in front of a bridge.jpg", "mask_strategy": "0"}
+tangerines in a metal bowl on a table{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/tangerines in a metal bowl on a table.jpg", "mask_strategy": "0"}
+a shadow of a hand reaching for a leaf{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a shadow of a hand reaching for a leaf.jpg", "mask_strategy": "0"}
+A teddy bear is climbing over a wooden fence{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A teddy bear is climbing over a wooden fence.jpg", "mask_strategy": "0"}
+a book on fire with flames coming out of it{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a book on fire with flames coming out of it.jpg", "mask_strategy": "0"}
+a close-up of a pink rose with water droplets on it{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up of a pink rose with water droplets on it.jpg", "mask_strategy": "0"}
+a person is cooking meat on a grill with flames{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person is cooking meat on a grill with flames.jpg", "mask_strategy": "0"}
+a snowman wearing a santa hat and scarf{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a snowman wearing a santa hat and scarf.jpg", "mask_strategy": "0"}
+a person holding a sparkler in their hand{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person holding a sparkler in their hand.jpg", "mask_strategy": "0"}
+a teddy bear sitting on a moss covered ground{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a teddy bear sitting on a moss covered ground.jpg", "mask_strategy": "0"}
+a statue of a lion is sitting on a pedestal{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a statue of a lion is sitting on a pedestal.jpg", "mask_strategy": "0"}
+metal balls are suspended in the air{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/metal balls are suspended in the air.jpg", "mask_strategy": "0"}
+a close up of a bunch of green grapes{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close up of a bunch of green grapes.jpg", "mask_strategy": "0"}
+a close-up view of a green plant with unfurled fronds{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of a green plant with unfurled fronds.jpg", "mask_strategy": "0"}
+an orange mushroom sitting on top of a tree stump in the woods{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an orange mushroom sitting on top of a tree stump in the woods.jpg", "mask_strategy": "0"}
+a stack of pancakes covered in syrup and fruit{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a stack of pancakes covered in syrup and fruit.jpg", "mask_strategy": "0"}
+a plate of spaghetti with spinach and tomatoes{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a plate of spaghetti with spinach and tomatoes.jpg", "mask_strategy": "0"}
+a pink lotus flower in the middle of a pond{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pink lotus flower in the middle of a pond.jpg", "mask_strategy": "0"}
+a person holding a sparkler in front of a sunset{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person holding a sparkler in front of a sunset.jpg", "mask_strategy": "0"}
+a pink rose is blooming in a garden{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pink rose is blooming in a garden.jpg", "mask_strategy": "0"}
+a snow man holding a lantern in the snow{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a snow man holding a lantern in the snow.jpg", "mask_strategy": "0"}
+a stack of chocolate cookies with a bite taken out of it{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a stack of chocolate cookies with a bite taken out of it.jpg", "mask_strategy": "0"}
+a white plate topped with eggs, toast, tomatoes, and a sausage{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a white plate topped with eggs, toast, tomatoes, and a sausage.jpg", "mask_strategy": "0"}
+a yellow water lily is floating in a pond{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a yellow water lily is floating in a pond.jpg", "mask_strategy": "0"}
+an astronaut floating in space with the earth in the background{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an astronaut floating in space with the earth in the background.jpg", "mask_strategy": "0"}
+A little girl, lost in thought, is quietly sitting on the bus{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A little girl, lost in thought, is quietly sitting on the bus.jpg", "mask_strategy": "0"}
+a man holding a tray in front of a brick wall{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man holding a tray in front of a brick wall.jpg", "mask_strategy": "0"}
+an older man playing a saxophone on the street{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an older man playing a saxophone on the street.jpg", "mask_strategy": "0"}
+an older man jogging by the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an older man jogging by the water.jpg", "mask_strategy": "0"}
+a person riding a skateboard on a concrete floor{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person riding a skateboard on a concrete floor.jpg", "mask_strategy": "0"}
+a woman with long black hair is posing for a picture{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman with long black hair is posing for a picture.jpg", "mask_strategy": "0"}
+a woman sitting on the ground in front of a guitar{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman sitting on the ground in front of a guitar.jpg", "mask_strategy": "0"}
+a little girl wearing a purple helmet riding a blue bike{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a little girl wearing a purple helmet riding a blue bike.jpg", "mask_strategy": "0"}
+a young boy is jumping in the mud{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a young boy is jumping in the mud.jpg", "mask_strategy": "0"}
+a man sitting in the driver's seat of a car wearing sunglasses{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man sitting in the driver's seat of a car wearing sunglasses.jpg", "mask_strategy": "0"}
+a little boy jumping in the air over a puddle of water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a little boy jumping in the air over a puddle of water.jpg", "mask_strategy": "0"}
+a woman with afro hair is smiling while wearing earphones{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman with afro hair is smiling while wearing earphones.jpg", "mask_strategy": "0"}
+a smiling woman with her hands clasped{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a smiling woman with her hands clasped.jpg", "mask_strategy": "0"}
+a young boy standing in a field with horses in the background{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a young boy standing in a field with horses in the background.jpg", "mask_strategy": "0"}
+a young man is covered in colored powder{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a young man is covered in colored powder.jpg", "mask_strategy": "0"}
+a woman with curly hair is drinking a beer{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman with curly hair is drinking a beer.jpg", "mask_strategy": "0"}
+an old man standing in the middle of a field holding a bunch of plants{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an old man standing in the middle of a field holding a bunch of plants.jpg", "mask_strategy": "0"}
+a man standing on a boat with a net{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man standing on a boat with a net.jpg", "mask_strategy": "0"}
+a woman in a hat is putting salt into a basket{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman in a hat is putting salt into a basket.jpg", "mask_strategy": "0"}
+a young girl smelling a pink flower{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a young girl smelling a pink flower.jpg", "mask_strategy": "0"}
+a young boy leaning on a wooden pole{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a young boy leaning on a wooden pole.jpg", "mask_strategy": "0"}
+a man in a hat sitting in front of a brick oven{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man in a hat sitting in front of a brick oven.jpg", "mask_strategy": "0"}
+a man in a mexican outfit holding an acoustic guitar{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man in a mexican outfit holding an acoustic guitar.jpg", "mask_strategy": "0"}
+a snowboarder is in the air doing a trick{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a snowboarder is in the air doing a trick.jpg", "mask_strategy": "0"}
+a man riding a horse with a spear in his hand{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man riding a horse with a spear in his hand.jpg", "mask_strategy": "0"}
+a woman carrying a bundle of plants over their head{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman carrying a bundle of plants over their head.jpg", "mask_strategy": "0"}
+a person jumping in the air over a fence{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person jumping in the air over a fence.jpg", "mask_strategy": "0"}
+a man on a surfboard riding a wave in the ocean{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man on a surfboard riding a wave in the ocean.jpg", "mask_strategy": "0"}
+a man sitting on steps playing an acoustic guitar{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man sitting on steps playing an acoustic guitar.jpg", "mask_strategy": "0"}
+a man swinging a tennis racquet at a tennis ball{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man swinging a tennis racquet at a tennis ball.jpg", "mask_strategy": "0"}
+a man riding a mountain bike on top of a rocky hill{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man riding a mountain bike on top of a rocky hill.jpg", "mask_strategy": "0"}
+a man riding a bike down a street{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man riding a bike down a street.jpg", "mask_strategy": "0"}
+a man is running on a dirt road{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man is running on a dirt road.jpg", "mask_strategy": "0"}
+A man in a black suit and a sombrero, shouting loudly{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A man in a black suit and a sombrero, shouting loudly.jpg", "mask_strategy": "0"}
+a man standing on top of a sand dune in the desert{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man standing on top of a sand dune in the desert.jpg", "mask_strategy": "0"}
+a person riding a motorcycle down a road{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person riding a motorcycle down a road.jpg", "mask_strategy": "0"}
+a man standing on top of a mountain with a backpack{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man standing on top of a mountain with a backpack.jpg", "mask_strategy": "0"}
+a man with a skull face paint smoking a cigar and holding a guitar{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man with a skull face paint smoking a cigar and holding a guitar.jpg", "mask_strategy": "0"}
+a man in sunglasses laying on a wooden bench{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man in sunglasses laying on a wooden bench.jpg", "mask_strategy": "0"}
+an older woman sitting in a room with a cigarette in her hand{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an older woman sitting in a room with a cigarette in her hand.jpg", "mask_strategy": "0"}
+a man sitting on the ground playing a musical instrument{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man sitting on the ground playing a musical instrument.jpg", "mask_strategy": "0"}
+a person riding a horse in a polo match{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person riding a horse in a polo match.jpg", "mask_strategy": "0"}
+a woman in a kimono holding an umbrella{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman in a kimono holding an umbrella.jpg", "mask_strategy": "0"}
+a person riding a dirt bike{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person riding a dirt bike.jpg", "mask_strategy": "0"}
+a person riding an atv on a dirt track{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person riding an atv on a dirt track.jpg", "mask_strategy": "0"}
+a person riding a wave on a surfboard{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person riding a wave on a surfboard.jpg", "mask_strategy": "0"}
+a woman in a wetsuit is swimming in the ocean{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman in a wetsuit is swimming in the ocean.jpg", "mask_strategy": "0"}
+a man snorkling in the ocean{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man snorkling in the ocean.jpg", "mask_strategy": "0"}
+a beautiful woman in a blue sari posing in front of a wall{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a beautiful woman in a blue sari posing in front of a wall.jpg", "mask_strategy": "0"}
+a woman wearing a shawl in front of a mountain{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman wearing a shawl in front of a mountain.jpg", "mask_strategy": "0"}
+a woman is making bread in an oven{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman is making bread in an oven.jpg", "mask_strategy": "0"}
+a woman smiles while holding a yellow flower{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman smiles while holding a yellow flower.jpg", "mask_strategy": "0"}
+A young boy is lifting a bundle of dry grass wrapped in waterproof fabric over his head{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A young boy is lifting a bundle of dry grass wrapped in waterproof fabric over his head.jpg", "mask_strategy": "0"}
+two people performing a sword fight in front of a forest{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two people performing a sword fight in front of a forest.jpg", "mask_strategy": "0"}
+a woman in a colorful shirt is cooking food{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman in a colorful shirt is cooking food.jpg", "mask_strategy": "0"}
+an older woman is drinking a bottle of water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an older woman is drinking a bottle of water.jpg", "mask_strategy": "0"}
+a smiling woman sitting at a table with food and drinks{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a smiling woman sitting at a table with food and drinks.jpg", "mask_strategy": "0"}
+a woman wearing a hijab reading a book on the beach{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman wearing a hijab reading a book on the beach.jpg", "mask_strategy": "0"}
+a woman wearing a headscarf is reaching for an olive tree{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman wearing a headscarf is reaching for an olive tree.jpg", "mask_strategy": "0"}
+a woman in a white dress jumping in the air in a field of pink flowers{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman in a white dress jumping in the air in a field of pink flowers.jpg", "mask_strategy": "0"}
+a woman wearing a conical hat sits on a boat{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman wearing a conical hat sits on a boat.jpg", "mask_strategy": "0"}
+an older woman sitting in front of an old building{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an older woman sitting in front of an old building.jpg", "mask_strategy": "0"}
+a woman is praying in front of a buddhist temple{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman is praying in front of a buddhist temple.jpg", "mask_strategy": "0"}
+a woman with green hair smiling for the camera{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman with green hair smiling for the camera.jpg", "mask_strategy": "0"}
+A group of people in a yellow raft is rowing through turbulent waters{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A group of people in a yellow raft is rowing through turbulent waters.jpg", "mask_strategy": "0"}
+a man carrying a woman on his back in a field{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man carrying a woman on his back in a field.jpg", "mask_strategy": "0"}
+an indian police officer talking to an old woman{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an indian police officer talking to an old woman.jpg", "mask_strategy": "0"}
+two people scuba diving in the ocean{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two people scuba diving in the ocean.jpg", "mask_strategy": "0"}
+A man and woman dressed as sugar skulls in a field of flowers, sharing a loving gaze with each other{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A man and woman dressed as sugar skulls in a field of flowers, sharing a loving gaze with each other.jpg", "mask_strategy": "0"}
+a group of people watching a cow race{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of people watching a cow race.jpg", "mask_strategy": "0"}
+a man and a child riding bumper cars in an amusement park{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man and a child riding bumper cars in an amusement park.jpg", "mask_strategy": "0"}
+a group of motorcyclists racing on a dirt track{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of motorcyclists racing on a dirt track.jpg", "mask_strategy": "0"}
+a man and a woman are boxing in a boxing ring{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man and a woman are boxing in a boxing ring.jpg", "mask_strategy": "0"}
+a man holding a baby in his arms{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man holding a baby in his arms.jpg", "mask_strategy": "0"}
+a man and a woman sitting on a bench playing instruments{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man and a woman sitting on a bench playing instruments.jpg", "mask_strategy": "0"}
+two men are standing next to each other with a bicycle{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two men are standing next to each other with a bicycle.jpg", "mask_strategy": "0"}
+a man and a boy sitting on a beach near the ocean{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man and a boy sitting on a beach near the ocean.jpg", "mask_strategy": "0"}
+two men in white clothing standing next to each other{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two men in white clothing standing next to each other.jpg", "mask_strategy": "0"}
+a group of men riding horses in a dusty arena{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of men riding horses in a dusty arena.jpg", "mask_strategy": "0"}
+a soccer player in a yellow and black shirt is chasing a soccer ball{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a soccer player in a yellow and black shirt is chasing a soccer ball.jpg", "mask_strategy": "0"}
+a group of women sitting on the steps of a building{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of women sitting on the steps of a building.jpg", "mask_strategy": "0"}
+a group of people gathered around a red checkered blanket{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of people gathered around a red checkered blanket.jpg", "mask_strategy": "0"}
+a group of people in orange jumpsuits running along a river{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of people in orange jumpsuits running along a river.jpg", "mask_strategy": "0"}
+a woman walking down a sidewalk with a bag{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman walking down a sidewalk with a bag.jpg", "mask_strategy": "0"}
+a busy street with cars and people on motorcycles{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a busy street with cars and people on motorcycles.jpg", "mask_strategy": "0"}
+a man in a mask is walking through a crowd of people{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man in a mask is walking through a crowd of people.jpg", "mask_strategy": "0"}
+a man and a woman walking under an umbrella next to a brick wall{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a man and a woman walking under an umbrella next to a brick wall.jpg", "mask_strategy": "0"}
+a group of people riding bikes down a street{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of people riding bikes down a street.jpg", "mask_strategy": "0"}
+An old person is holding a cup on the street, and people around are curiously looking at him{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/An old person is holding a cup on the street, and people around are curiously looking at him.jpg", "mask_strategy": "0"}
+two young girls playing with leaves in the woods{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two young girls playing with leaves in the woods.jpg", "mask_strategy": "0"}
+One person is riding on the back of a horse led by another person{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/One person is riding on the back of a horse led by another person.jpg", "mask_strategy": "0"}
+an older woman and a young girl are knitting together{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/an older woman and a young girl are knitting together.jpg", "mask_strategy": "0"}
+three geishas walking down the street in traditional clothing{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/three geishas walking down the street in traditional clothing.jpg", "mask_strategy": "0"}
+two men riding bikes down a road near a forest{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two men riding bikes down a road near a forest.jpg", "mask_strategy": "0"}
+two women carrying bowls on their heads{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two women carrying bowls on their heads.jpg", "mask_strategy": "0"}
+two women eating pizza at a restaurant{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two women eating pizza at a restaurant.jpg", "mask_strategy": "0"}
+two young women studying in a library{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two young women studying in a library.jpg", "mask_strategy": "0"}
+pink water lilies in a pond with leaves{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/pink water lilies in a pond with leaves.jpg", "mask_strategy": "0"}
+a group of succulents in a rock garden{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of succulents in a rock garden.jpg", "mask_strategy": "0"}
+a close up view of a bunch of snowdrop flowers{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close up view of a bunch of snowdrop flowers.jpg", "mask_strategy": "0"}
+a close up of leaves with water droplets on them{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close up of leaves with water droplets on them.jpg", "mask_strategy": "0"}
+a close-up of a sea anemone in the water{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up of a sea anemone in the water.jpg", "mask_strategy": "0"}
+a plant with water droplets on it{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a plant with water droplets on it.jpg", "mask_strategy": "0"}
+a group of cactus plants in the desert{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of cactus plants in the desert.jpg", "mask_strategy": "0"}
+a close-up view of a plant with spiky leaves{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of a plant with spiky leaves.jpg", "mask_strategy": "0"}
+A budding and blossoming flower bud seedling{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A budding and blossoming flower bud seedling.jpg", "mask_strategy": "0"}
+a field of orange flowers near the ocean'{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a field of orange flowers near the ocean'.jpg", "mask_strategy": "0"}
+a close-up view of a bunch of pink flowers{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close-up view of a bunch of pink flowers.jpg", "mask_strategy": "0"}
+pink water lilies in a pond{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/pink water lilies in a pond.jpg", "mask_strategy": "0"}
+reeds blowing in the wind against a cloudy sky{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/reeds blowing in the wind against a cloudy sky.jpg", "mask_strategy": "0"}
+two tall cacti in the middle of the desert{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two tall cacti in the middle of the desert.jpg", "mask_strategy": "0"}
+a sea anemone on a coral reef{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a sea anemone on a coral reef.jpg", "mask_strategy": "0"}
+a dandelion blowing in the wind{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a dandelion blowing in the wind.jpg", "mask_strategy": "0"}
+A boiling pot cooking vegetables{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A boiling pot cooking vegetables.jpg", "mask_strategy": "0"}
+a woman stirring food in a pan on the stove{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a woman stirring food in a pan on the stove.jpg", "mask_strategy": "0"}
+two eggs are fried in a frying pan on the stove{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/two eggs are fried in a frying pan on the stove.jpg", "mask_strategy": "0"}
+fried onion rings in a basket{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/fried onion rings in a basket.jpg", "mask_strategy": "0"}
+a pot is sitting on top of a campfire{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pot is sitting on top of a campfire.jpg", "mask_strategy": "0"}
+a chef is preparing a dish with mushrooms on a wooden board{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a chef is preparing a dish with mushrooms on a wooden board.jpg", "mask_strategy": "0"}
+a hand holding a slice of pizza{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a hand holding a slice of pizza.jpg", "mask_strategy": "0"}
+A person is using tongs to pick up meat from a plate{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A person is using tongs to pick up meat from a plate.jpg", "mask_strategy": "0"}
+The meat is picked up from the grill with tongs{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/The meat is picked up from the grill with tongs.jpg", "mask_strategy": "0"}
+A person is whisking eggs, and the egg whites and yolks are gently streaming out{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A person is whisking eggs, and the egg whites and yolks are gently streaming out.jpg", "mask_strategy": "0"}
+a person is putting sauce on a burger{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person is putting sauce on a burger.jpg", "mask_strategy": "0"}
+A person is making dumplings{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A person is making dumplings.jpg", "mask_strategy": "0"}
+a pan filled with fried food{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a pan filled with fried food.jpg", "mask_strategy": "0"}
+Chopsticks are slowly picking up the buns from the plastic container{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/Chopsticks are slowly picking up the buns from the plastic container.jpg", "mask_strategy": "0"}
+a basket of french fries in a fryer{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a basket of french fries in a fryer.jpg", "mask_strategy": "0"}
+a table with lobsters and drinks on it{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a table with lobsters and drinks on it.jpg", "mask_strategy": "0"}
+a person pouring coffee into a pot on a stove{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person pouring coffee into a pot on a stove.jpg", "mask_strategy": "0"}
+a kettle is sitting on top of a campfire{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a kettle is sitting on top of a campfire.jpg", "mask_strategy": "0"}
+Chopsticks are picking up noodles from the bowl{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/Chopsticks are picking up noodles from the bowl.jpg", "mask_strategy": "0"}
+a person is cooking eggs on an outdoor grill{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person is cooking eggs on an outdoor grill.jpg", "mask_strategy": "0"}
+a person is cooking food in a wok on a stove{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person is cooking food in a wok on a stove.jpg", "mask_strategy": "0"}
+a person is holding up a burger with his hands{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person is holding up a burger with his hands.jpg", "mask_strategy": "0"}
+A person is pouring water into a teacup{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/A person is pouring water into a teacup.jpg", "mask_strategy": "0"}
+a person pouring seasoning into a pot of food{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person pouring seasoning into a pot of food.jpg", "mask_strategy": "0"}
+a person holding a taco in their hand{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person holding a taco in their hand.jpg", "mask_strategy": "0"}
+a person slicing salmon on a cutting board{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person slicing salmon on a cutting board.jpg", "mask_strategy": "0"}
+a bunch of food is cooking on a grill over an open fire{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a bunch of food is cooking on a grill over an open fire.jpg", "mask_strategy": "0"}
+a close up of a piece of sushi on chopsticks{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a close up of a piece of sushi on chopsticks.jpg", "mask_strategy": "0"}
+a group of pots on a stove with flames in the background{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a group of pots on a stove with flames in the background.jpg", "mask_strategy": "0"}
+a person cooking vegetables in a pan on a stove{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person cooking vegetables in a pan on a stove.jpg", "mask_strategy": "0"}
+a large pot of soup filled with vegetables and meat{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a large pot of soup filled with vegetables and meat.jpg", "mask_strategy": "0"}
+a person holding chopsticks over a bowl of food{"reference_path": "/mnt/jfs-hdd/sora/data/vbench-i2v/crop/1-1/a person holding chopsticks over a bowl of food.jpg", "mask_strategy": "0"}
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/animal.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/animal.txt
new file mode 100644
index 0000000000000000000000000000000000000000..775f4c4d6f5190ce860ab61358a3896e7762f104
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/animal.txt
@@ -0,0 +1,100 @@
+a black dog wearing halloween costume
+spider making a web
+bat eating fruits while hanging
+a snake crawling on a wooden flooring
+a close up video of a dragonfly
+macro shot of ladybug on green leaf plant
+chameleon eating ant
+a bee feeding on nectars
+bird nests on a tree captured with moving camera
+a squirrel eating nuts
+close up video of snail
+top view of a hermit crab crawling on a wooden surface
+cat licking another cat
+red dragonfly perched on green leaf
+close up view of a brown caterpillar crawling on green leaf
+ants eating dead spider
+an eagle on a tree branch
+a frog eating an ant
+white rabbit near the fence
+a gorilla eating a carrot
+close up of wolf
+a meerkat looking around
+a hyena in a zoo
+lemur eating grass leaves
+an owl being trained by a man
+a lizard on a bamboo
+brown chicken hunting for its food
+video of parrots perched on bird stand
+underwater footage of an octopus in a coral reef
+a cute pomeranian dog playing with a soccer ball
+white fox on rock
+close up footage of a horse figurine
+giraffe feeding on a tree in a savannah
+curious cat sitting and looking around
+hummingbird hawk moth flying near pink flowers
+close up of a scorpion on a rock
+close up on fish in net
+koala eating leaves from a branch
+a pod of dolphins swirling in the sea catching forage fish
+low angle view of a hawk perched on a tree branch
+a lion standing on wild grass
+deer grazing in the field
+elephant herd in a savanna
+close up on lobster under water
+hedgehog crossing road in forest
+a sheep eating yellow flowers from behind a wire fence
+twin sisters and a turtle
+a pig wallowing in mud
+flock of goose eating on the lake water
+cow in a field irritated with flies
+a close up shot of a fly
+cheetah lying on the grass
+close up of a lemur
+close up shot of a kangaroo itching in the sand
+a tortoise covered with algae
+turkey in cage
+a great blue heron bird in the lakeside
+crab with shell in aquarium
+a seagull walking on shore
+an american crocodile
+a tiger walking inside a cage
+alligator in the nature
+a raccoon climbing a tree
+wild rabbit in a green meadow
+group of ring tailed lemurs
+a clouded leopard on a tree branch
+duck grooming its feathers
+an african penguin walking on a beach
+a video of a peacock
+close up shot of a wild bear
+baby rhino plays with mom
+porcupine climbs tree branches
+close up of a natterjack toad on a rock
+a sleeping orangutan
+mother whale swimming with babies
+a bear wearing red jersey
+pink jellyfish swimming underwater in a blue sea
+beautiful clown fish swimming
+animation of disposable objects shaped as a whale
+paper cut out of a pair of hands a whale and a heart
+vertical video of camel roaming in the field during daytime
+a still video of mosquito biting human
+a curious sloth hanging from a tree branch
+a plastic flamingo bird stumbles from the wind
+a wolf in its natural habitat
+a monkey sitting in the stone and scratching his head
+bat hanging upside down
+a red panda eating leaves
+snake on ground
+a harbour seal swimming near the shore
+shark swimming in the sea
+otter on branch while eating
+goat standing over a rock
+a troop of monkey on top of a mountain
+a zebra eating grass on the field
+a colorful butterfly perching on a bud
+a snail crawling on a leaf
+zookeeper showering a baby elephant
+a beetle emerging from the sand
+a nine banded armadillo searching for food
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/architecture.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/architecture.txt
new file mode 100644
index 0000000000000000000000000000000000000000..599b76baafda9b85e6a13cb56978522fdecfe2f2
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/architecture.txt
@@ -0,0 +1,100 @@
+an apartment building with balcony
+asian garden and medieval castle
+illuminated tower in berlin
+a wooden house overseeing the lake
+a crowd of people in a plaza in front of a government building
+a church interior
+jewish friends posing with hanukkah menorah in a cabin house
+a destroyed building after a missile attack in ukraine
+abandoned building in the woods
+drone video of an abandoned school building in pripyat ukraine
+elegant university building
+architecture and designs of buildings in central london
+a pancake tower with chocolate syrup and strawberries on top
+an ancient white building
+friends hanging out at a coffee house
+house front door with christmas decorations
+city night dark building
+a bird house hanging on a tree branch
+sacred sculpture in a temple
+high angle shot of a clock tower
+modern wooden house interior
+the interior of an abandoned building
+opera house overlooking sea
+a concrete structure near the green trees
+dome like building in scotland
+low angle shot of a building
+tower on hill
+a miniature house
+eiffel tower from the seine river
+low angle footage of an apartment building
+island with pier and antique building
+asian historic architecture
+drone footage of a beautiful mansion
+mosque in the middle east
+building a tent and hammock in the forest camping site
+top view of a high rise building
+house covered in snow
+skyscraper at night
+house in village
+a casino with people outside the building
+silhouette of a building
+a woman climbing a tree house
+drone view of house near lake during golden hour
+an under construction concrete house
+a watch tower by the sea
+exterior view of arabic style building
+video of a hotel building
+red paper lantern decorations hanging outside a building
+house on seashore
+aerial footage of the palace of culture and science building in warsaw poland
+aerial video of stuttgart tv tower in germany
+aerial view of the highway and building in a city
+drone shot of a skyscraper san francisco california usa
+waterfall and house
+view of the sky through a building
+drone footage of a house on top of the mountain
+abandoned house in the nature
+clouds hovering over a mansion
+light house on the ocean
+buddhist temple at sunrise
+people walking by a graveyard near a mosque at sunset
+view of lifeguard tower on the beach
+scenic view of a house in the mountains
+the landscape in front of a government building
+aerial footage of a building and its surrounding landscape in winter
+time lapse of a cloudy sky behind a transmission tower
+blue ocean near the brown castle
+fog over temple
+house in countryside top view
+building under construction
+turkish flag waving on old tower
+the georgian building
+close up shot of a steel structure
+the atrium and interior design of a multi floor building
+city view reflected on a glass building
+aerial view of a luxurious house with pool
+an unpaved road leading to the house
+drone footage of a lookout tower in mountain landscape
+wind turbines on hill behind building
+time lapse footage of the sun light in front of a small house porch
+a building built with lots of stairways
+overcast over house on seashore
+the view of the sydney opera house from the other side of the harbor
+candle on a jar and a house figurine on a surface
+video of a farm and house
+a dilapidated building made of bricks
+a view of a unique building from a moving vehicle
+aerial footage of a tall building in cambodia
+push in shot of a huge house
+a beach house built over a seawall protected from the sea waves
+exotic house surrounded by trees
+drone video of a house surrounded by tropical vegetation
+drone footage of a building beside a pond
+observation tower on hill in forest
+a tree house in the woods
+a video of vessel structure during daytime
+fire in front of illuminated building at night
+a footage of a wooden house on a wheat field
+tilt shot of a solar panel below a light tower
+water tower on the desert
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/food.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/food.txt
new file mode 100644
index 0000000000000000000000000000000000000000..032aed96d61b209fa9e58befe3bf5e4afe9dd20a
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/food.txt
@@ -0,0 +1,100 @@
+freshly baked finger looking cookies
+video of fake blood in wine glass
+halloween food art
+a person slicing a vegetable
+a serving of pumpkin dish in a plate
+close up view of green leafy vegetable
+a birthday cake in the plate
+video of a slice papaya fruit
+a muffin with a burning candle and a love sign by a ceramic mug
+a jack o lantern designed cookie
+baked bread with chocolate
+a broccoli soup on wooden table
+a freshly brewed coffee on a pink mug
+grabbing sourdough neapolitan style pizza slices
+person cooking mushrooms in frying pan
+rice grains placed on a reusable cloth bag
+slices of kiwi fruit
+grilling a steak on a pan grill
+close up of bread popping out of a toaster
+man eating noodle
+preparing a cocktail drink
+close up pasta with bacon on plate
+milk and cinnamon rolls
+boy getting a dumpling using chopsticks
+a mother preparing food with her kids
+man using his phone while eating
+fresh salmon salad on a plate
+cutting cucumbers into long thin slices as ingredient for sushi roll
+a steaming cup of tea by the window
+a glass filled with beer
+a kid eating popcorn while watching tv
+close up shot of fried fish on the plate
+a man eating a donut
+person making a vegetarian dish
+spreading cheese on bagel
+close up view of a man drinking red wine
+a couple having breakfast in a restaurant
+a student eating her sandwich
+girl peeling a banana
+red rice in a small bowl
+pancake with blueberry on the top
+green apple fruit on white wooden table
+a man eating a taco by the bar
+making of a burrito
+squeezing lemon into salad
+a chef cutting sushi rolls
+video of a delicious dessert
+deep frying a crab on a wok in high fire
+close up video of a orange juice
+video of a cooked chicken breast
+woman holding a pineapple
+a woman eating a bar of chocolate
+decorating christmas cookie
+squeezing a slice of fruit
+tuna sashimi on a plate
+a strawberry fruit mixed in an alcoholic drink
+preparing hot dogs in a grill
+a woman cutting a tomato
+an orange fruit cut in half
+a coconut fruit with drinking straw
+woman holding a dragon fruit
+a woman pouring hot beverage on a cup
+waffles with whipped cream and fruit
+focus shot of an insect at the bottom of a fruit
+preparing a healthy broccoli dish
+man eating snack at picnic
+close up video of a grilled shrimp skewer
+a woman mixing a smoothie drinks
+close up video of woman having a bite of jelly
+businessman drinking whiskey at the bar counter of a hotel lounge
+cutting an onion with a knife over a wooden chopping board
+fresh lemonade in bottles
+grilling a meat on a charcoal grill
+people enjoying asian cuisine
+close up footage of a hot dish on a clay pot
+pork ribs dish
+waffle with strawberry and syrup for breakfast
+tofu dish with rose garnish
+uncooked pork meat
+egg yolk being dumped over gourmet dish
+tasty brunch dish close up
+little boy pretending to eat the watermelon
+slicing roasted beef
+close up of a chef adding teriyaki sauce to a dish
+flat lay mexican dish
+a person placing an octopus dish on a marble surface
+close up of tea leaves brewing in a glass kettle
+adding fresh herbs to soup dish
+a scoop of roasted coffee beans
+fresh dim sum set up on a bamboo steam tray for cooking
+a girl putting ketchup on food at the kitchen
+cooking on electric stove
+a woman with a slice of a pie
+grapes and wine on a wooden board
+man taking picture of his food
+hamburger and fries on restaurant table
+close up video of japanese food
+a cracker sandwich with cheese filling for snack
+barista preparing matcha tea
+close up of onion rings being deep fried
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/human.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/human.txt
new file mode 100644
index 0000000000000000000000000000000000000000..88de93a4815801b42c80d24d282c296aae827dd8
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/human.txt
@@ -0,0 +1,100 @@
+people carving a pumpkin
+people sitting on a sofa
+a man with a muertos face painting
+man walking in the dark
+men in front of their computer editing photos
+men loading christmas tree on tow truck
+woman washing the dishes
+woman adding honey to the cinnamon rolls
+two women kissing and smiling
+three women looking at watercolor paintings
+a family wearing paper bag masks
+a family posing for the camera
+a boy covering a rose flower with a dome glass
+boy sitting on grass petting a dog
+a girl in her tennis sportswear
+a girl coloring the cardboard
+silhouette of the couple during sunset
+couple dancing with body paint
+a child playing with water
+a woman with her child sitting on a couch in the living room
+a group of friend place doing hand gestures of agreement
+friends having a group selfie
+friends talking while on the basketball court
+group of people protesting
+a group of campers with a cute dog
+a group of photographers taking pictures at the north western gardens in llandudno north wales
+a group of students laughing and talking
+a group of martial artist warming up
+a person playing golf
+a person walking on a wet wooden bridge
+person doing a leg exercise
+ice hockey athlete on rink
+a young athlete training in swimming
+chess player dusting a chessboard
+baseball player holding his bat
+a bearded man putting a vinyl record on a vinyl player
+an orchestra finishes a performance
+people applauding the performance of the kids
+band performance at the recording studio
+father and his children playing jenga game
+people playing a board game
+man playing a video game
+a man video recording the movie in theater
+man and a woman eating while watching a movie
+movie crew talking together
+a director explaining the movie scene
+man and woman listening to music on car
+man playing music
+couple dancing slow dance with sun glare
+a ballerina practicing in the dance studio
+father and son holding hands
+father and daughter talking together
+a mother and her kids engaged in a video call
+mother and daughter reading a book together
+a mother teaching her daughter playing a violin
+kid in a halloween costume
+a happy kid playing the ukulele
+a chef slicing a cucumber
+chef wearing his gloves properly
+brother and sister using hammock
+girl applying sunblock to her brother
+a girl pushing the chair while her sister is on the chair
+colleagues talking in office building
+fighter practice kicking
+a woman fighter in her cosplay costume
+an engineer holding blueprints while talking with her colleague
+a young woman looking at vr controllers with her friend
+workmates teasing a colleague in the work
+a male police officer talking on the radio
+teacher holding a marker while talking
+teacher writing on her notebook
+a young student attending her online classes
+a student showing his classmates his wand
+a male vendor selling fruits
+a shirtless male climber
+a sound engineer listening to music
+female talking to a psychiatrist in a therapy session
+young female activist posing with flag
+a man in a hoodie and woman with a red bandana talking to each other and smiling
+a medium close up of women wearing kimonos
+a male interviewer listening to a person talking
+a social worker having a conversation with the foster parents
+a farm worker harvesting onions
+worker packing street food
+worker and client at barber shop
+elderly man lifting kettlebell
+mom assisting son in riding a bicycle
+dad watching her daughter eat
+young guy with vr headset
+pregnant woman exercising with trainer
+a fortune teller talking to a client
+wizard doing a ritual on a woman
+a footage of an actor on a movie scene
+a man holding a best actor trophy
+a singer of a music band
+a young singer performing on stage
+young dancer practicing at home
+seller showing room to a couple
+cab driver talking to passenger
+a policeman talking to the car driver
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/lifestyle.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/lifestyle.txt
new file mode 100644
index 0000000000000000000000000000000000000000..78c8be0b7e7ed7dcf10356aad0f254ae8f73301b
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/lifestyle.txt
@@ -0,0 +1,100 @@
+kids celebrating halloween at home
+little boy helping mother in kitchen
+video of a indoor green plant
+a girl arranges a christmas garland hanging by the kitchen cabinet
+candle burning in dark room
+couple having fun and goofing around the bedroom
+girls jumping up and down in the bedroom
+woman and man in pajamas working from home
+a muslim family sitting and talking in the living room
+family enjoying snack time while sitting in the living room
+woman holding an animal puppet and a little girl playing together at the living room
+kids playing in the indoor tent
+young people celebrating new year at the office
+a woman writing on the sticky note in the office
+a woman exercising at home over a yoga mat
+girls preparing easter decorations at home
+dog on floor in room
+turning on a fluorescent light inside a room
+colleagues talking to each other near the office windows
+a woman recording herself while exercising at home
+music room
+different kind of tools kept in a utility room
+sofa beds and other furniture
+a girl finding her brother reading a book in the bedroom
+an elegant ceramic plant pot and hanging plant on indoor
+furniture inside a bedroom
+interior design of the bar section
+living room with party decoration
+firewood burning in dark room
+a young woman playing the ukulele at home
+woman painting at home
+a woman in a locker room
+video of a bathroom interior
+the interior design of a jewish synagogue
+a woman in protective suit disinfecting the kitchen
+modern minimalist home interior
+modern interior design of a coffee shop
+person arranging minimalist furniture
+aerial shot of interior of the warehouse
+a room of a manufacturing facility
+interior of catholic
+interior design of a restaurant
+a female model in a changing room looking herself in mirror
+men walking in the office hallway
+people sitting in a conference room
+the interior design of a shopping mall
+chandeliers in room
+lucerne railway station interior
+a female fencer posing in a foggy room
+a toolbox and a paint roller beside a huge package in a room
+bedroom in hotel
+a woman lying in the operating room
+a chef holding and checking kitchen utensils
+a couple singing in the shower room together
+a woman cleaning mess in the living room
+an empty meeting room with natural light
+person dancing in a dark room
+close up on blood in hospital room
+a couple resting on their home floor
+a young female staff at courier office
+a man entering the gym locker room
+a bored man sitting by the tv at home
+woman dancing in indoor garden
+rubble in the interior of an abandoned house
+indoor farm in a greenhouse
+man doing handstand in indoor garden
+an abandoned indoor swimming pool
+home decorations on top of a cabinet
+graffiti art on the interior walls of an abandoned mansion
+indoor wall climbing activity
+sunlight inside a room
+teenage girl roller skating at indoor rink
+home deco with lighted
+baby in the shower room
+men enjoying office christmas party
+a bedroom with a brick wall
+actors prepping in the dressing room
+kids playing at an indoor playground
+a person sanitizing an office space using smoke machine
+mother and daughter choosing clothes at home
+a woman sitting by the indoor fire pit
+man standing on the corner of the room while looking around
+person assembling furniture
+a family stacking cardboard boxes in a room
+family having fun in the dining room
+person disinfecting a room
+a woman washing strawberries in the kitchen sink
+modern office waiting room
+close up view of a person slicing with a kitchen knife
+boiling coffee on a stove in the kitchen
+modern equipment used in a home studio
+interior of a recording studio
+people working in a call center office
+band performing at a home concert
+a group of people watching a concert in a room
+people packing their furniture
+young employees in office holding a certificate
+a criminal inside a dark room handcuffed in a table
+couple browsing and looking for furniture in the store
+workspace at home
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/plant.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/plant.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7d04fc74e26f830191a74cf3b36b9b275b10f55f
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/plant.txt
@@ -0,0 +1,100 @@
+video of a indoor green plant
+close up view of a plant
+close up shot of a burning plant
+plucking leaves from plant
+a plant on gold pot with glass lid
+a branch of a tree and a plant
+a leafless tree
+close up shot of fern leaf
+close up video of strawberry plant
+plant with blooming flowers
+close up video of flower petals
+watering yellow plant
+beautiful flower decoration
+cannabis flower in a jar
+a footage of the tree leaves
+a red leaf plant
+close up view of a white christmas tree
+snow pouring on a tree
+close up shot of white flowers on the tree
+leaves in the trees daytime
+a dead tree lying on a grass field
+tree branches in a flowing river
+purple flowers with leaves
+a coconut tree by the house
+close up on flower in winter
+bamboo leaves backlit by the sun
+close up video of a wet flower
+a man putting a flower in a box
+dropping flower petals on a wooden bowl
+a close up shot of gypsophila flower
+variety of succulent plants on a garden
+variety of trees and plants in a botanical garden
+forest of deciduous trees
+a stack of dried leaves burning in a forest
+tall forest trees on a misty morning
+close up view of dewdrops on a leaf
+close up view of white petaled flower
+removing a pineapple leaf
+a dragonfly perched on a leaf
+butterfly pollinating flower
+person visiting and checking a corn plant
+woman picking beans from a plant
+woman plucking mint leaves
+single tree in the middle of farmland
+a plant on a soil
+drone footage of a tree on farm field
+a tractor harvesting lavender flower
+people putting christmas ornaments on a christmas tree
+jack o lantern hanging on a tree
+tree with halloween decoration
+flower field near the waterfall
+truck carrying the tree logs
+raindrops falling on leaves
+shot of a palm tree swaying with the wind
+squirrels on a tree branch
+person holding a flower
+a fallen tree trunk
+tree with golden leaves
+cherry tree
+wind blows through leaves of the tree in autumn
+a leaf on a glass
+the long trunks of tall trees in the forest
+trees in the forest during sunny day
+close up video of tree bark
+reflection of tree branches
+trunks of many trees in the forest
+tree leaves providing shades from the sun
+leaves swaying in the wind
+low angle shot of baobab tree
+bare trees in forest
+a plant surrounded by fallen leaves
+a couple preparing food and pruning a plant
+a man cutting a tree bark
+oranges on a tree branch
+plant connected on the stones
+video of a sawmill machine cutting tree log
+women drying flower petals
+macro view of an agave plant
+a video of a person tying a plant on a string
+green moss in forest nature
+coconut tree near sea under blue sky
+the canopy of a coconut tree
+a man leaning on a tree at the beach
+a full grown plant on a pot
+candle wax dripping on flower petals
+close up of leaves in autumn
+a woman opening a book with a flower inside
+a man holding leaves looking at the camera
+a shadow of a swaying plant
+a tree and concrete structure under a blue and cloudy sky
+trimming excess leaves on a potted plant
+the changing color of the tree leaves during autumn season
+a gooseberry tree swayed by the wind
+forest trees and a medieval castle at sunset
+woman cut down tree
+an old oak tree in a park across the street from a hotel
+wild flowers growing in a forest ground
+a mossy fountain and green plants in a botanical garden
+mansion with beautiful garden
+ants on a dragon fruit flower
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/scenery.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/scenery.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5d77e3fcc8528f0fdbdd9c342209cb2a824e4490
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/scenery.txt
@@ -0,0 +1,100 @@
+scenery of desert landscape
+landscape agriculture farm tractor
+burning slash piles in the forest
+graveyard at sunset
+view of a jack o lantern with pumpkins in a smoky garden
+sun view through a spider web
+view of the sea from an abandoned building
+close up view of a full moon
+close up view of lighted candles
+close up view of swaying white flowers and leaves
+scenery of a relaxing beach
+selective focus video of grass during sunny day
+aerial view of brown dry landscape
+fireworks display in the sky at night
+a bonfire near river
+mountain view
+waterfalls in between mountain
+a picturesque view of nature
+exotic view of a riverfront city
+tall trees in the forest under the clear sky
+snow on branches in forest
+stream in the nature
+an airplane flying above the sea of clouds
+scenic video of sunset
+view of houses with bush fence under a blue and cloudy sky
+scenic view from wooden pathway
+scenic view of a tropical beach
+drone footage of waves crashing on beach shore
+a scenic view of the golden hour at norway
+time lapse video of foggy mountain forest
+brown mountain during fall season
+video of ocean during daytime
+boat sailing in the ocean
+top view of yachts
+beautiful scenery of flowing waterfalls and river
+wild ducks paddling on the lake surface
+a relaxing scenery of beach view under cloudy sky
+natural rock formations on beach under cloudy sky
+a palm tree against blue sky
+video of sailboat on a lake during sunset
+aerial view of snow piles
+time lapse of a sunset sky in the countryside
+aerial footage of a statue
+time lapse video of a farm during sunset
+clouds formation in the sky at sunset
+aerial shot of a village
+drone shot of a beautiful sunrise at the mountains
+time lapse video of foggy morning during sunrise
+sun shining between tree leaves at sunrise
+video of lake during dawn
+vehicles traveling on roadway under cloudy sky
+view of golden domed church
+a monument under the blue sky
+firecrackers in the sky
+view of fruit signage in the farm
+a dark clouds over shadowing the full moon
+view of the amazon river
+a big river swamp in a dense forest
+a blooming cherry blossom tree under a blue sky with white clouds
+a river waterfall cascading down the plunge basin
+flooded landscape with palm trees
+a blurry waterfall background
+waterfall in the mountains
+aerial footage of a city at night
+pond by small waterfall in forest
+aerial view of farmlands at the bay of lake
+rice terraces in the countryside
+a highway built across an agricultural area in the countryside
+gloomy morning in the countryside
+drone shot of an abandoned coliseum on a snowy mountain top
+boat sailing in the middle of ocean
+drone shot of the grass field
+natural landscape of mountain and sea with islets developed into a community
+aerial view of zaporizhia in ukraine
+aerial footage of a herd
+an aerial footage of a red sky
+grass and plants growing in the remains of an abandoned house
+view from hill on city
+aerial view on orthodox church
+aerial view of bay in croatia
+a footage of a frozen river
+overlooking view of a city at daylight
+view outside the cemetery
+clear sky with moon over meadow
+clouds over railway
+aerial footage of moving vehicles on the road at night
+aerial view of town and park
+top view of skyscrapers
+top view of the empire state building in manhattan
+top view of the central park in new york city
+sheep running in a grass field
+clear sky over factory
+smoke and fire in birds eye view
+view of a pathway with snow melting on its side
+ferry under bridge on river near city in malaysia
+mountain slopes covered in green vegetation
+panoramic view of a town surrounded by snow covered mountains
+aerial view of a palace
+top view of vehicles driving on the intersection
+a graveyard by a church in a mountain landscape
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/vehicles.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/vehicles.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5fd5fcad2c20d277aa58b6366fa195e48d21e6dc
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_category/vehicles.txt
@@ -0,0 +1,100 @@
+a modern railway station in malaysia use for public transportation
+drone footage of amsterdam metro station
+train arriving at a station
+red vehicle driving on field
+close up view of flashing emergency vehicle lighting
+vehicle with fertilizer on field
+a highway built across an agricultural area in the countryside
+drone footage of motorcycles driving on country road between agricultural fields
+a road in the woods under fog
+footage of a car driving through a wheat field
+vehicle stops for an ambulance passing through city traffic
+emergency vehicle parked outside the casino
+zombies attacking a woman and a boy inside a car
+woman seating inside the car while chewing
+video of passengers riding a double decker bus during night
+traffic in london street at night
+elderly couple checking engine of automobile
+a green vintage automobile with an open hood parked in a parking area
+close up of a prototype automobile with exposed engine on the back seat of the car
+aerial view of road in forest
+train departing from station
+aerial view of a train passing by a bridge
+video of a train tracks
+video footage of a subway
+video of blinking traffic lights
+couple walking out on the subway
+time lapse of a subway tunnel
+monitor board inside the subway
+metro train at night
+zoom in video of a tram passing by city
+young man using laptop in the tram
+man reading a book at bus stop
+close up shot of a moving taxi
+night travel in london street on a public bus
+red bus in a rainy city
+flow of traffic in the city
+close up shot of a yellow taxi turning left
+two women calling for a taxi
+drone view of an illuminated bridge across a river
+policeman in police car talking on radio
+airplane taking off at night
+view through window in airplane
+an airplane in the sky
+helicopter landing on the street
+a pilot getting out of a helicopter
+a helicopter flying under blue sky
+boat sailing in the middle of the ocean
+girl playing with a toy boat
+silhouette of a boat on sea during golden hour
+a boat travelling around the lake
+road on mountain ridge
+ship sailing on danube river
+slow motion video of a ship water trail in the sea
+drone footage of a wreck ship on shore
+a white yacht traveling on a river and passing under the bridge
+female teenagers drinking champagne in the yacht
+video of yacht sailing in the ocean
+red combine harvester on road on field
+a woman sitting on a bicycle while using a mobile phone
+a woman sitting on a motorcycle looking around
+three teenagers fixing a bicycle
+a woman in a halloween costume posing on a motorcycle
+a parked motorcycle on a foggy roadside
+cable car near sea shore
+a truck travelling in the road
+footage of the road without any traffic
+a road sign
+love padlocks on a bridge
+camera moving at highway construction site
+vehicles driving on highway
+a motorbike on highway at timelapse mode
+point of view of a car driving through a tunnel
+time lapse of heavy traffic on an avenue
+ferry boat on city canal
+black vintage car in museum
+a zigzag road across a forest
+people crossing the road
+video of a kayak boat in a river
+a person paddling a wooden boat in a lake
+a car charging in the parking area
+cars parked on the road
+footage of the street with people and vehicle passing by in the rain
+traffic on busy city street
+a woman getting out of the car to walk with their dog
+yacht sailing through the ocean
+people in queue to military ship
+man wearing motorcycle helmet looking at the camera
+empty seats in the bus
+empty boat on the water
+cargo train traveling on the mountainside
+cruise ship in harbor
+counting down at traffic lights
+pressing the car ignition
+fire truck driving on the road
+a footage of a broken bicycle
+drone footage of an ambulance on the road
+slow motion footage of a racing car
+ship sailing on sea against sunset
+big cargo ship passing on the shore
+back view of man and woman walking on unpaved road
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/appearance_style.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/appearance_style.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4382de9e79fd19e9b1ca18497efb2e1cc9e4ca91
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/appearance_style.txt
@@ -0,0 +1,90 @@
+A beautiful coastal beach in spring, waves lapping on sand, Van Gogh style
+A beautiful coastal beach in spring, waves lapping on sand, oil painting
+A beautiful coastal beach in spring, waves lapping on sand by Hokusai, in the style of Ukiyo
+A beautiful coastal beach in spring, waves lapping on sand, black and white
+A beautiful coastal beach in spring, waves lapping on sand, pixel art
+A beautiful coastal beach in spring, waves lapping on sand, in cyberpunk style
+A beautiful coastal beach in spring, waves lapping on sand, animated style
+A beautiful coastal beach in spring, waves lapping on sand, watercolor painting
+A beautiful coastal beach in spring, waves lapping on sand, surrealism style
+The bund Shanghai, Van Gogh style
+The bund Shanghai, oil painting
+The bund Shanghai by Hokusai, in the style of Ukiyo
+The bund Shanghai, black and white
+The bund Shanghai, pixel art
+The bund Shanghai, in cyberpunk style
+The bund Shanghai, animated style
+The bund Shanghai, watercolor painting
+The bund Shanghai, surrealism style
+a shark is swimming in the ocean, Van Gogh style
+a shark is swimming in the ocean, oil painting
+a shark is swimming in the ocean by Hokusai, in the style of Ukiyo
+a shark is swimming in the ocean, black and white
+a shark is swimming in the ocean, pixel art
+a shark is swimming in the ocean, in cyberpunk style
+a shark is swimming in the ocean, animated style
+a shark is swimming in the ocean, watercolor painting
+a shark is swimming in the ocean, surrealism style
+A panda drinking coffee in a cafe in Paris, Van Gogh style
+A panda drinking coffee in a cafe in Paris, oil painting
+A panda drinking coffee in a cafe in Paris by Hokusai, in the style of Ukiyo
+A panda drinking coffee in a cafe in Paris, black and white
+A panda drinking coffee in a cafe in Paris, pixel art
+A panda drinking coffee in a cafe in Paris, in cyberpunk style
+A panda drinking coffee in a cafe in Paris, animated style
+A panda drinking coffee in a cafe in Paris, watercolor painting
+A panda drinking coffee in a cafe in Paris, surrealism style
+A cute happy Corgi playing in park, sunset, Van Gogh style
+A cute happy Corgi playing in park, sunset, oil painting
+A cute happy Corgi playing in park, sunset by Hokusai, in the style of Ukiyo
+A cute happy Corgi playing in park, sunset, black and white
+A cute happy Corgi playing in park, sunset, pixel art
+A cute happy Corgi playing in park, sunset, in cyberpunk style
+A cute happy Corgi playing in park, sunset, animated style
+A cute happy Corgi playing in park, sunset, watercolor painting
+A cute happy Corgi playing in park, sunset, surrealism style
+Gwen Stacy reading a book, Van Gogh style
+Gwen Stacy reading a book, oil painting
+Gwen Stacy reading a book by Hokusai, in the style of Ukiyo
+Gwen Stacy reading a book, black and white
+Gwen Stacy reading a book, pixel art
+Gwen Stacy reading a book, in cyberpunk style
+Gwen Stacy reading a book, animated style
+Gwen Stacy reading a book, watercolor painting
+Gwen Stacy reading a book, surrealism style
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, Van Gogh style
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, oil painting
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background by Hokusai, in the style of Ukiyo
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, black and white
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, pixel art
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, in cyberpunk style
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, animated style
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, watercolor painting
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, surrealism style
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, Van Gogh style
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, oil painting
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas by Hokusai, in the style of Ukiyo
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, black and white
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, pixel art
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, in cyberpunk style
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, animated style
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, watercolor painting
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, surrealism style
+An astronaut flying in space, Van Gogh style
+An astronaut flying in space, oil painting
+An astronaut flying in space by Hokusai, in the style of Ukiyo
+An astronaut flying in space, black and white
+An astronaut flying in space, pixel art
+An astronaut flying in space, in cyberpunk style
+An astronaut flying in space, animated style
+An astronaut flying in space, watercolor painting
+An astronaut flying in space, surrealism style
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, Van Gogh style
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, oil painting
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks by Hokusai, in the style of Ukiyo
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, black and white
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, pixel art
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, in cyberpunk style
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, animated style
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, watercolor painting
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, surrealism style
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/color.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/color.txt
new file mode 100644
index 0000000000000000000000000000000000000000..42e135adfd64950df6c3111d4587b08b02727cbe
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/color.txt
@@ -0,0 +1,85 @@
+a red bicycle
+a green bicycle
+a blue bicycle
+a yellow bicycle
+an orange bicycle
+a purple bicycle
+a pink bicycle
+a black bicycle
+a white bicycle
+a red car
+a green car
+a blue car
+a yellow car
+an orange car
+a purple car
+a pink car
+a black car
+a white car
+a red bird
+a green bird
+a blue bird
+a yellow bird
+an orange bird
+a purple bird
+a pink bird
+a black bird
+a white bird
+a black cat
+a white cat
+an orange cat
+a yellow cat
+a red umbrella
+a green umbrella
+a blue umbrella
+a yellow umbrella
+an orange umbrella
+a purple umbrella
+a pink umbrella
+a black umbrella
+a white umbrella
+a red suitcase
+a green suitcase
+a blue suitcase
+a yellow suitcase
+an orange suitcase
+a purple suitcase
+a pink suitcase
+a black suitcase
+a white suitcase
+a red bowl
+a green bowl
+a blue bowl
+a yellow bowl
+an orange bowl
+a purple bowl
+a pink bowl
+a black bowl
+a white bowl
+a red chair
+a green chair
+a blue chair
+a yellow chair
+an orange chair
+a purple chair
+a pink chair
+a black chair
+a white chair
+a red clock
+a green clock
+a blue clock
+a yellow clock
+an orange clock
+a purple clock
+a pink clock
+a black clock
+a white clock
+a red vase
+a green vase
+a blue vase
+a yellow vase
+an orange vase
+a purple vase
+a pink vase
+a black vase
+a white vase
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/human_action.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/human_action.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2abd9a25a447e296d999df0d24920acd441f6a4d
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/human_action.txt
@@ -0,0 +1,100 @@
+A person is riding a bike
+A person is marching
+A person is roller skating
+A person is tasting beer
+A person is clapping
+A person is drawing
+A person is petting animal (not cat)
+A person is eating watermelon
+A person is playing harp
+A person is wrestling
+A person is riding scooter
+A person is sweeping floor
+A person is skateboarding
+A person is dunking basketball
+A person is playing flute
+A person is stretching leg
+A person is tying tie
+A person is skydiving
+A person is shooting goal (soccer)
+A person is playing piano
+A person is finger snapping
+A person is canoeing or kayaking
+A person is laughing
+A person is digging
+A person is clay pottery making
+A person is shooting basketball
+A person is bending back
+A person is shaking hands
+A person is bandaging
+A person is push up
+A person is catching or throwing frisbee
+A person is playing trumpet
+A person is flying kite
+A person is filling eyebrows
+A person is shuffling cards
+A person is folding clothes
+A person is smoking
+A person is tai chi
+A person is squat
+A person is playing controller
+A person is throwing axe
+A person is giving or receiving award
+A person is air drumming
+A person is taking a shower
+A person is planting trees
+A person is sharpening knives
+A person is robot dancing
+A person is rock climbing
+A person is hula hooping
+A person is writing
+A person is bungee jumping
+A person is pushing cart
+A person is cleaning windows
+A person is cutting watermelon
+A person is cheerleading
+A person is washing hands
+A person is ironing
+A person is cutting nails
+A person is hugging
+A person is trimming or shaving beard
+A person is jogging
+A person is making bed
+A person is washing dishes
+A person is grooming dog
+A person is doing laundry
+A person is knitting
+A person is reading book
+A person is baby waking up
+A person is massaging legs
+A person is brushing teeth
+A person is crawling baby
+A person is motorcycling
+A person is driving car
+A person is sticking tongue out
+A person is shaking head
+A person is sword fighting
+A person is doing aerobics
+A person is strumming guitar
+A person is riding or walking with horse
+A person is archery
+A person is catching or throwing baseball
+A person is playing chess
+A person is rock scissors paper
+A person is using computer
+A person is arranging flowers
+A person is bending metal
+A person is ice skating
+A person is climbing a rope
+A person is crying
+A person is dancing ballet
+A person is getting a haircut
+A person is running on treadmill
+A person is kissing
+A person is counting money
+A person is barbequing
+A person is peeling apples
+A person is milking cow
+A person is shining shoes
+A person is making snowman
+A person is sailing
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/multiple_objects.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/multiple_objects.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1a1211eeff1021efc49cab7e4c8e91867862949e
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/multiple_objects.txt
@@ -0,0 +1,82 @@
+a bird and a cat
+a cat and a dog
+a dog and a horse
+a horse and a sheep
+a sheep and a cow
+a cow and an elephant
+an elephant and a bear
+a bear and a zebra
+a zebra and a giraffe
+a giraffe and a bird
+a chair and a couch
+a couch and a potted plant
+a potted plant and a tv
+a tv and a laptop
+a laptop and a remote
+a remote and a keyboard
+a keyboard and a cell phone
+a cell phone and a book
+a book and a clock
+a clock and a backpack
+a backpack and an umbrella
+an umbrella and a handbag
+a handbag and a tie
+a tie and a suitcase
+a suitcase and a vase
+a vase and scissors
+scissors and a teddy bear
+a teddy bear and a frisbee
+a frisbee and skis
+skis and a snowboard
+a snowboard and a sports ball
+a sports ball and a kite
+a kite and a baseball bat
+a baseball bat and a baseball glove
+a baseball glove and a skateboard
+a skateboard and a surfboard
+a surfboard and a tennis racket
+a tennis racket and a bottle
+a bottle and a chair
+an airplane and a train
+a train and a boat
+a boat and an airplane
+a bicycle and a car
+a car and a motorcycle
+a motorcycle and a bus
+a bus and a traffic light
+a traffic light and a fire hydrant
+a fire hydrant and a stop sign
+a stop sign and a parking meter
+a parking meter and a truck
+a truck and a bicycle
+a toilet and a hair drier
+a hair drier and a toothbrush
+a toothbrush and a sink
+a sink and a toilet
+a wine glass and a chair
+a cup and a couch
+a fork and a potted plant
+a knife and a tv
+a spoon and a laptop
+a bowl and a remote
+a banana and a keyboard
+an apple and a cell phone
+a sandwich and a book
+an orange and a clock
+broccoli and a backpack
+a carrot and an umbrella
+a hot dog and a handbag
+a pizza and a tie
+a donut and a suitcase
+a cake and a vase
+an oven and scissors
+a toaster and a teddy bear
+a microwave and a frisbee
+a refrigerator and skis
+a bicycle and an airplane
+a car and a train
+a motorcycle and a boat
+a person and a toilet
+a person and a hair drier
+a person and a toothbrush
+a person and a sink
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/object_class.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/object_class.txt
new file mode 100644
index 0000000000000000000000000000000000000000..11aa72dd0e757e7272c1c734f804d482abe98c5f
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/object_class.txt
@@ -0,0 +1,79 @@
+a person
+a bicycle
+a car
+a motorcycle
+an airplane
+a bus
+a train
+a truck
+a boat
+a traffic light
+a fire hydrant
+a stop sign
+a parking meter
+a bench
+a bird
+a cat
+a dog
+a horse
+a sheep
+a cow
+an elephant
+a bear
+a zebra
+a giraffe
+a backpack
+an umbrella
+a handbag
+a tie
+a suitcase
+a frisbee
+skis
+a snowboard
+a sports ball
+a kite
+a baseball bat
+a baseball glove
+a skateboard
+a surfboard
+a tennis racket
+a bottle
+a wine glass
+a cup
+a fork
+a knife
+a spoon
+a bowl
+a banana
+an apple
+a sandwich
+an orange
+broccoli
+a carrot
+a hot dog
+a pizza
+a donut
+a cake
+a chair
+a couch
+a potted plant
+a bed
+a dining table
+a toilet
+a tv
+a laptop
+a remote
+a keyboard
+a cell phone
+a microwave
+an oven
+a toaster
+a sink
+a refrigerator
+a book
+a clock
+a vase
+scissors
+a teddy bear
+a hair drier
+a toothbrush
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/overall_consistency.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/overall_consistency.txt
new file mode 100644
index 0000000000000000000000000000000000000000..360c2673b95fbd898222b0ea3a0fec98779f4b25
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/overall_consistency.txt
@@ -0,0 +1,93 @@
+Close up of grapes on a rotating table.
+Turtle swimming in ocean.
+A storm trooper vacuuming the beach.
+A panda standing on a surfboard in the ocean in sunset.
+An astronaut feeding ducks on a sunny afternoon, reflection from the water.
+Two pandas discussing an academic paper.
+Sunset time lapse at the beach with moving clouds and colors in the sky.
+A fat rabbit wearing a purple robe walking through a fantasy landscape.
+A koala bear playing piano in the forest.
+An astronaut flying in space.
+Fireworks.
+An animated painting of fluffy white clouds moving in sky.
+Flying through fantasy landscapes.
+A bigfoot walking in the snowstorm.
+A squirrel eating a burger.
+A cat wearing sunglasses and working as a lifeguard at a pool.
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks.
+Splash of turquoise water in extreme slow motion, alpha channel included.
+an ice cream is melting on the table.
+a drone flying over a snowy forest.
+a shark is swimming in the ocean.
+Aerial panoramic video from a drone of a fantasy land.
+a teddy bear is swimming in the ocean.
+time lapse of sunrise on mars.
+golden fish swimming in the ocean.
+An artist brush painting on a canvas close up.
+A drone view of celebration with Christmas tree and fireworks, starry sky - background.
+happy dog wearing a yellow turtleneck, studio, portrait, facing camera, dark background
+Origami dancers in white paper, 3D render, on white background, studio shot, dancing modern dance.
+Campfire at night in a snowy forest with starry sky in the background.
+a fantasy landscape
+A 3D model of a 1800s victorian house.
+this is how I do makeup in the morning.
+A raccoon that looks like a turtle, digital art.
+Robot dancing in Times Square.
+Busy freeway at night.
+Balloon full of water exploding in extreme slow motion.
+An astronaut is riding a horse in the space in a photorealistic style.
+Macro slo-mo. Slow motion cropped closeup of roasted coffee beans falling into an empty bowl.
+Sewing machine, old sewing machine working.
+Motion colour drop in water, ink swirling in water, colourful ink in water, abstraction fancy dream cloud of ink.
+Few big purple plums rotating on the turntable. water drops appear on the skin during rotation. isolated on the white background. close-up. macro.
+Vampire makeup face of beautiful girl, red contact lenses.
+Ashtray full of butts on table, smoke flowing on black background, close-up
+Pacific coast, carmel by the sea ocean and waves.
+A teddy bear is playing drum kit in NYC Times Square.
+A corgi is playing drum kit.
+An Iron man is playing the electronic guitar, high electronic guitar.
+A raccoon is playing the electronic guitar.
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background by Vincent van Gogh
+A corgi's head depicted as an explosion of a nebula
+A fantasy landscape
+A future where humans have achieved teleportation technology
+A jellyfish floating through the ocean, with bioluminescent tentacles
+A Mars rover moving on Mars
+A panda drinking coffee in a cafe in Paris
+A space shuttle launching into orbit, with flames and smoke billowing out from the engines
+A steam train moving on a mountainside
+A super cool giant robot in Cyberpunk Beijing
+A tropical beach at sunrise, with palm trees and crystal-clear water in the foreground
+Cinematic shot of Van Gogh's selfie, Van Gogh style
+Gwen Stacy reading a book
+Iron Man flying in the sky
+The bund Shanghai, oil painting
+Yoda playing guitar on the stage
+A beautiful coastal beach in spring, waves lapping on sand by Hokusai, in the style of Ukiyo
+A beautiful coastal beach in spring, waves lapping on sand by Vincent van Gogh
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background
+A car moving slowly on an empty street, rainy evening
+A cat eating food out of a bowl
+A cat wearing sunglasses at a pool
+A confused panda in calculus class
+A cute fluffy panda eating Chinese food in a restaurant
+A cute happy Corgi playing in park, sunset
+A cute raccoon playing guitar in a boat on the ocean
+A happy fuzzy panda playing guitar nearby a campfire, snow mountain in the background
+A lightning striking atop of eiffel tower, dark clouds in the sky
+A modern art museum, with colorful paintings
+A panda cooking in the kitchen
+A panda playing on a swing set
+A polar bear is playing guitar
+A raccoon dressed in suit playing the trumpet, stage background
+A robot DJ is playing the turntable, in heavy raining futuristic tokyo rooftop cyberpunk night, sci-fi, fantasy
+A shark swimming in clear Caribbean ocean
+A super robot protecting city
+A teddy bear washing the dishes
+An epic tornado attacking above a glowing city at night, the tornado is made of smoke
+An oil painting of a couple in formal evening wear going home get caught in a heavy downpour with umbrellas
+Clown fish swimming through the coral reef
+Hyper-realistic spaceship landing on Mars
+The bund Shanghai, vibrant color
+Vincent van Gogh is painting in the room
+Yellow flowers swing in the wind
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/scene.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/scene.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1ec0366233eba9957eb5d2772064c4b5d0e9b05d
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/scene.txt
@@ -0,0 +1,86 @@
+alley
+amusement park
+aquarium
+arch
+art gallery
+bathroom
+bakery shop
+ballroom
+bar
+barn
+basement
+beach
+bedroom
+bridge
+botanical garden
+cafeteria
+campsite
+campus
+carrousel
+castle
+cemetery
+classroom
+cliff
+crosswalk
+construction site
+corridor
+courtyard
+desert
+downtown
+driveway
+farm
+food court
+football field
+forest road
+fountain
+gas station
+glacier
+golf course
+indoor gymnasium
+harbor
+highway
+hospital
+house
+iceberg
+industrial area
+jail cell
+junkyard
+kitchen
+indoor library
+lighthouse
+laboratory
+mansion
+marsh
+mountain
+indoor movie theater
+indoor museum
+music studio
+nursery
+ocean
+office
+palace
+parking lot
+pharmacy
+phone booth
+raceway
+restaurant
+river
+science museum
+shower
+ski slope
+sky
+skyscraper
+baseball stadium
+staircase
+street
+supermarket
+indoor swimming pool
+tower
+outdoor track
+train railway
+train station platform
+underwater coral reef
+valley
+volcano
+waterfall
+windmill
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/spatial_relationship.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/spatial_relationship.txt
new file mode 100644
index 0000000000000000000000000000000000000000..25fe959fd8dc27674a2d52b4349ff3aacbe8d66c
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/spatial_relationship.txt
@@ -0,0 +1,84 @@
+a bicycle on the left of a car, front view
+a car on the right of a motorcycle, front view
+a motorcycle on the left of a bus, front view
+a bus on the right of a traffic light, front view
+a traffic light on the left of a fire hydrant, front view
+a fire hydrant on the right of a stop sign, front view
+a stop sign on the left of a parking meter, front view
+a parking meter on the right of a bench, front view
+a bench on the left of a truck, front view
+a truck on the right of a bicycle, front view
+a bird on the left of a cat, front view
+a cat on the right of a dog, front view
+a dog on the left of a horse, front view
+a horse on the right of a sheep, front view
+a sheep on the left of a cow, front view
+a cow on the right of an elephant, front view
+an elephant on the left of a bear, front view
+a bear on the right of a zebra, front view
+a zebra on the left of a giraffe, front view
+a giraffe on the right of a bird, front view
+a bottle on the left of a wine glass, front view
+a wine glass on the right of a cup, front view
+a cup on the left of a fork, front view
+a fork on the right of a knife, front view
+a knife on the left of a spoon, front view
+a spoon on the right of a bowl, front view
+a bowl on the left of a bottle, front view
+a potted plant on the left of a remote, front view
+a remote on the right of a clock, front view
+a clock on the left of a vase, front view
+a vase on the right of scissors, front view
+scissors on the left of a teddy bear, front view
+a teddy bear on the right of a potted plant, front view
+a frisbee on the left of a sports ball, front view
+a sports ball on the right of a baseball bat, front view
+a baseball bat on the left of a baseball glove, front view
+a baseball glove on the right of a tennis racket, front view
+a tennis racket on the left of a frisbee, front view
+a toilet on the left of a hair drier, front view
+a hair drier on the right of a toothbrush, front view
+a toothbrush on the left of a sink, front view
+a sink on the right of a toilet, front view
+a chair on the left of a couch, front view
+a couch on the right of a bed, front view
+a bed on the left of a tv, front view
+a tv on the right of a dining table, front view
+a dining table on the left of a chair, front view
+an airplane on the left of a train, front view
+a train on the right of a boat, front view
+a boat on the left of an airplane, front view
+an oven on the top of a toaster, front view
+an oven on the bottom of a toaster, front view
+a toaster on the top of a microwave, front view
+a toaster on the bottom of a microwave, front view
+a microwave on the top of an oven, front view
+a microwave on the bottom of an oven, front view
+a banana on the top of an apple, front view
+a banana on the bottom of an apple, front view
+an apple on the top of a sandwich, front view
+an apple on the bottom of a sandwich, front view
+a sandwich on the top of an orange, front view
+a sandwich on the bottom of an orange, front view
+an orange on the top of a carrot, front view
+an orange on the bottom of a carrot, front view
+a carrot on the top of a hot dog, front view
+a carrot on the bottom of a hot dog, front view
+a hot dog on the top of a pizza, front view
+a hot dog on the bottom of a pizza, front view
+a pizza on the top of a donut, front view
+a pizza on the bottom of a donut, front view
+a donut on the top of broccoli, front view
+a donut on the bottom of broccoli, front view
+broccoli on the top of a banana, front view
+broccoli on the bottom of a banana, front view
+skis on the top of a snowboard, front view
+skis on the bottom of a snowboard, front view
+a snowboard on the top of a kite, front view
+a snowboard on the bottom of a kite, front view
+a kite on the top of a skateboard, front view
+a kite on the bottom of a skateboard, front view
+a skateboard on the top of a surfboard, front view
+a skateboard on the bottom of a surfboard, front view
+a surfboard on the top of skis, front view
+a surfboard on the bottom of skis, front view
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/subject_consistency.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/subject_consistency.txt
new file mode 100644
index 0000000000000000000000000000000000000000..97cb77e5efe48b1fc3730eb9d6144df55baadcb5
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/subject_consistency.txt
@@ -0,0 +1,72 @@
+a person swimming in ocean
+a person giving a presentation to a room full of colleagues
+a person washing the dishes
+a person eating a burger
+a person walking in the snowstorm
+a person drinking coffee in a cafe
+a person playing guitar
+a bicycle leaning against a tree
+a bicycle gliding through a snowy field
+a bicycle slowing down to stop
+a bicycle accelerating to gain speed
+a car stuck in traffic during rush hour
+a car turning a corner
+a car slowing down to stop
+a car accelerating to gain speed
+a motorcycle cruising along a coastal highway
+a motorcycle turning a corner
+a motorcycle slowing down to stop
+a motorcycle gliding through a snowy field
+a motorcycle accelerating to gain speed
+an airplane soaring through a clear blue sky
+an airplane taking off
+an airplane landing smoothly on a runway
+an airplane accelerating to gain speed
+a bus turning a corner
+a bus stuck in traffic during rush hour
+a bus accelerating to gain speed
+a train speeding down the tracks
+a train crossing over a tall bridge
+a train accelerating to gain speed
+a truck turning a corner
+a truck anchored in a tranquil bay
+a truck stuck in traffic during rush hour
+a truck slowing down to stop
+a truck accelerating to gain speed
+a boat sailing smoothly on a calm lake
+a boat slowing down to stop
+a boat accelerating to gain speed
+a bird soaring gracefully in the sky
+a bird building a nest from twigs and leaves
+a bird flying over a snowy forest
+a cat grooming itself meticulously with its tongue
+a cat playing in park
+a cat drinking water
+a cat running happily
+a dog enjoying a peaceful walk
+a dog playing in park
+a dog drinking water
+a dog running happily
+a horse bending down to drink water from a river
+a horse galloping across an open field
+a horse taking a peaceful walk
+a horse running to join a herd of its kind
+a sheep bending down to drink water from a river
+a sheep taking a peaceful walk
+a sheep running to join a herd of its kind
+a cow bending down to drink water from a river
+a cow chewing cud while resting in a tranquil barn
+a cow running to join a herd of its kind
+an elephant spraying itself with water using its trunk to cool down
+an elephant taking a peaceful walk
+an elephant running to join a herd of its kind
+a bear catching a salmon in its powerful jaws
+a bear sniffing the air for scents of food
+a bear climbing a tree
+a bear hunting for prey
+a zebra bending down to drink water from a river
+a zebra running to join a herd of its kind
+a zebra taking a peaceful walk
+a giraffe bending down to drink water from a river
+a giraffe taking a peaceful walk
+a giraffe running to join a herd of its kind
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/temporal_flickering.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/temporal_flickering.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9fb5cad2d0c6b7a9cdeea5e0c084c2140fcd4152
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/temporal_flickering.txt
@@ -0,0 +1,75 @@
+In a still frame, a stop sign
+a toilet, frozen in time
+a laptop, frozen in time
+A tranquil tableau of alley
+A tranquil tableau of bar
+A tranquil tableau of barn
+A tranquil tableau of bathroom
+A tranquil tableau of bedroom
+A tranquil tableau of cliff
+In a still frame, courtyard
+In a still frame, gas station
+A tranquil tableau of house
+indoor gymnasium, frozen in time
+A tranquil tableau of indoor library
+A tranquil tableau of kitchen
+A tranquil tableau of palace
+In a still frame, parking lot
+In a still frame, phone booth
+A tranquil tableau of restaurant
+A tranquil tableau of tower
+A tranquil tableau of a bowl
+A tranquil tableau of an apple
+A tranquil tableau of a bench
+A tranquil tableau of a bed
+A tranquil tableau of a chair
+A tranquil tableau of a cup
+A tranquil tableau of a dining table
+In a still frame, a pear
+A tranquil tableau of a bunch of grapes
+A tranquil tableau of a bowl on the kitchen counter
+A tranquil tableau of a beautiful, handcrafted ceramic bowl
+A tranquil tableau of an antique bowl
+A tranquil tableau of an exquisite mahogany dining table
+A tranquil tableau of a wooden bench in the park
+A tranquil tableau of a beautiful wrought-iron bench surrounded by blooming flowers
+In a still frame, a park bench with a view of the lake
+A tranquil tableau of a vintage rocking chair was placed on the porch
+A tranquil tableau of the jail cell was small and dimly lit, with cold, steel bars
+A tranquil tableau of the phone booth was tucked away in a quiet alley
+a dilapidated phone booth stood as a relic of a bygone era on the sidewalk, frozen in time
+A tranquil tableau of the old red barn stood weathered and iconic against the backdrop of the countryside
+A tranquil tableau of a picturesque barn was painted a warm shade of red and nestled in a picturesque meadow
+In a still frame, within the desolate desert, an oasis unfolded, characterized by the stoic presence of palm trees and a motionless, glassy pool of water
+In a still frame, the Parthenon's majestic Doric columns stand in serene solitude atop the Acropolis, framed by the tranquil Athenian landscape
+In a still frame, the Temple of Hephaestus, with its timeless Doric grace, stands stoically against the backdrop of a quiet Athens
+In a still frame, the ornate Victorian streetlamp stands solemnly, adorned with intricate ironwork and stained glass panels
+A tranquil tableau of the Stonehenge presented itself as an enigmatic puzzle, each colossal stone meticulously placed against the backdrop of tranquility
+In a still frame, in the vast desert, an oasis nestled among dunes, featuring tall palm trees and an air of serenity
+static view on a desert scene with an oasis, palm trees, and a clear, calm pool of water
+A tranquil tableau of an ornate Victorian streetlamp standing on a cobblestone street corner, illuminating the empty night
+A tranquil tableau of a tranquil lakeside cabin nestled among tall pines, its reflection mirrored perfectly in the calm water
+In a still frame, a vintage gas lantern, adorned with intricate details, gracing a historic cobblestone square
+In a still frame, a tranquil Japanese tea ceremony room, with tatami mats, a delicate tea set, and a bonsai tree in the corner
+A tranquil tableau of the Parthenon stands resolute in its classical elegance, a timeless symbol of Athens' cultural legacy
+A tranquil tableau of in the heart of Plaka, the neoclassical architecture of the old city harmonizes with the ancient ruins
+A tranquil tableau of in the desolate beauty of the American Southwest, Chaco Canyon's ancient ruins whispered tales of an enigmatic civilization that once thrived amidst the arid landscapes
+A tranquil tableau of at the edge of the Arabian Desert, the ancient city of Petra beckoned with its enigmatic rock-carved façades
+In a still frame, amidst the cobblestone streets, an Art Nouveau lamppost stood tall
+A tranquil tableau of in the quaint village square, a traditional wrought-iron streetlamp featured delicate filigree patterns and amber-hued glass panels
+A tranquil tableau of the lampposts were adorned with Art Deco motifs, their geometric shapes and frosted glass creating a sense of vintage glamour
+In a still frame, in the picturesque square, a Gothic-style lamppost adorned with intricate stone carvings added a touch of medieval charm to the setting
+In a still frame, in the heart of the old city, a row of ornate lantern-style streetlamps bathed the narrow alleyway in a warm, welcoming light
+A tranquil tableau of in the heart of the Utah desert, a massive sandstone arch spanned the horizon
+A tranquil tableau of in the Arizona desert, a massive stone bridge arched across a rugged canyon
+A tranquil tableau of in the corner of the minimalist tea room, a bonsai tree added a touch of nature's beauty to the otherwise simple and elegant space
+In a still frame, amidst the hushed ambiance of the traditional tea room, a meticulously arranged tea set awaited, with porcelain cups, a bamboo whisk
+In a still frame, nestled in the Zen garden, a rustic teahouse featured tatami seating and a traditional charcoal brazier
+A tranquil tableau of a country estate's library featured elegant wooden shelves
+A tranquil tableau of beneath the shade of a solitary oak tree, an old wooden park bench sat patiently
+A tranquil tableau of beside a tranquil pond, a weeping willow tree draped its branches gracefully over the water's surface, creating a serene tableau of reflection and calm
+A tranquil tableau of in the Zen garden, a perfectly raked gravel path led to a serene rock garden
+In a still frame, a tranquil pond was fringed by weeping cherry trees, their blossoms drifting lazily onto the glassy surface
+In a still frame, within the historic library's reading room, rows of antique leather chairs and mahogany tables offered a serene haven for literary contemplation
+A tranquil tableau of a peaceful orchid garden showcased a variety of delicate blooms
+A tranquil tableau of in the serene courtyard, a centuries-old stone well stood as a symbol of a bygone era, its mossy stones bearing witness to the passage of time
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/temporal_style.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/temporal_style.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fea23cbd7f248d93978ee44fe1860f75e6978117
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/VBench/prompts_per_dimension/temporal_style.txt
@@ -0,0 +1,100 @@
+A beautiful coastal beach in spring, waves lapping on sand, in super slow motion
+A beautiful coastal beach in spring, waves lapping on sand, zoom in
+A beautiful coastal beach in spring, waves lapping on sand, zoom out
+A beautiful coastal beach in spring, waves lapping on sand, pan left
+A beautiful coastal beach in spring, waves lapping on sand, pan right
+A beautiful coastal beach in spring, waves lapping on sand, tilt up
+A beautiful coastal beach in spring, waves lapping on sand, tilt down
+A beautiful coastal beach in spring, waves lapping on sand, with an intense shaking effect
+A beautiful coastal beach in spring, waves lapping on sand, featuring a steady and smooth perspective
+A beautiful coastal beach in spring, waves lapping on sand, racking focus
+The bund Shanghai, in super slow motion
+The bund Shanghai, zoom in
+The bund Shanghai, zoom out
+The bund Shanghai, pan left
+The bund Shanghai, pan right
+The bund Shanghai, tilt up
+The bund Shanghai, tilt down
+The bund Shanghai, with an intense shaking effect
+The bund Shanghai, featuring a steady and smooth perspective
+The bund Shanghai, racking focus
+a shark is swimming in the ocean, in super slow motion
+a shark is swimming in the ocean, zoom in
+a shark is swimming in the ocean, zoom out
+a shark is swimming in the ocean, pan left
+a shark is swimming in the ocean, pan right
+a shark is swimming in the ocean, tilt up
+a shark is swimming in the ocean, tilt down
+a shark is swimming in the ocean, with an intense shaking effect
+a shark is swimming in the ocean, featuring a steady and smooth perspective
+a shark is swimming in the ocean, racking focus
+A panda drinking coffee in a cafe in Paris, in super slow motion
+A panda drinking coffee in a cafe in Paris, zoom in
+A panda drinking coffee in a cafe in Paris, zoom out
+A panda drinking coffee in a cafe in Paris, pan left
+A panda drinking coffee in a cafe in Paris, pan right
+A panda drinking coffee in a cafe in Paris, tilt up
+A panda drinking coffee in a cafe in Paris, tilt down
+A panda drinking coffee in a cafe in Paris, with an intense shaking effect
+A panda drinking coffee in a cafe in Paris, featuring a steady and smooth perspective
+A panda drinking coffee in a cafe in Paris, racking focus
+A cute happy Corgi playing in park, sunset, in super slow motion
+A cute happy Corgi playing in park, sunset, zoom in
+A cute happy Corgi playing in park, sunset, zoom out
+A cute happy Corgi playing in park, sunset, pan left
+A cute happy Corgi playing in park, sunset, pan right
+A cute happy Corgi playing in park, sunset, tilt up
+A cute happy Corgi playing in park, sunset, tilt down
+A cute happy Corgi playing in park, sunset, with an intense shaking effect
+A cute happy Corgi playing in park, sunset, featuring a steady and smooth perspective
+A cute happy Corgi playing in park, sunset, racking focus
+Gwen Stacy reading a book, in super slow motion
+Gwen Stacy reading a book, zoom in
+Gwen Stacy reading a book, zoom out
+Gwen Stacy reading a book, pan left
+Gwen Stacy reading a book, pan right
+Gwen Stacy reading a book, tilt up
+Gwen Stacy reading a book, tilt down
+Gwen Stacy reading a book, with an intense shaking effect
+Gwen Stacy reading a book, featuring a steady and smooth perspective
+Gwen Stacy reading a book, racking focus
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, in super slow motion
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, zoom in
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, zoom out
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, pan left
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, pan right
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, tilt up
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, tilt down
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, with an intense shaking effect
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, featuring a steady and smooth perspective
+A boat sailing leisurely along the Seine River with the Eiffel Tower in background, racking focus
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, in super slow motion
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, zoom in
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, zoom out
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, pan left
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, pan right
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, tilt up
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, tilt down
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, with an intense shaking effect
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, featuring a steady and smooth perspective
+A couple in formal evening wear going home get caught in a heavy downpour with umbrellas, racking focus
+An astronaut flying in space, in super slow motion
+An astronaut flying in space, zoom in
+An astronaut flying in space, zoom out
+An astronaut flying in space, pan left
+An astronaut flying in space, pan right
+An astronaut flying in space, tilt up
+An astronaut flying in space, tilt down
+An astronaut flying in space, with an intense shaking effect
+An astronaut flying in space, featuring a steady and smooth perspective
+An astronaut flying in space, racking focus
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, in super slow motion
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, zoom in
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, zoom out
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, pan left
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, pan right
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, tilt up
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, tilt down
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, with an intense shaking effect
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, featuring a steady and smooth perspective
+Snow rocky mountains peaks canyon. snow blanketed rocky mountains surround and shadow deep canyons. the canyons twist and bend through the high elevated mountain peaks, racking focus
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/imagenet_id.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/imagenet_id.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9085aa0034c05cc60e40b1f14be1bb4a2a171d2f
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/imagenet_id.txt
@@ -0,0 +1,8 @@
+207
+360
+387
+974
+88
+979
+417
+279
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/imagenet_labels.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/imagenet_labels.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6493fdbf907465063a2cee904fe3994a90d420cd
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/imagenet_labels.txt
@@ -0,0 +1,8 @@
+golden retriever
+otter
+lesser panda
+geyser
+macaw
+valley
+balloon
+golden panda
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/rand_types.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/rand_types.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bd4b5d85796ac735df1a5762a2aebecf7d1d7135
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/rand_types.txt
@@ -0,0 +1,40 @@
+随机电影镜头
+随机电影镜头
+随机电影镜头
+随机电影镜头
+随机电影镜头
+随机任务镜头
+随机任务镜头
+随机任务镜头
+随机任务镜头
+随机任务镜头
+随机游戏镜头
+随机游戏镜头
+随机游戏镜头
+随机游戏镜头
+随机游戏镜头
+随机开车镜头
+随机开车镜头
+随机开车镜头
+随机开车镜头
+随机开车镜头
+随机动物镜头
+随机动物镜头
+随机动物镜头
+随机动物镜头
+随机动物镜头
+随机森林镜头
+随机森林镜头
+随机森林镜头
+随机森林镜头
+随机森林镜头
+随机动漫镜头
+随机动漫镜头
+随机动漫镜头
+随机动漫镜头
+随机动漫镜头
+随机舞蹈镜头
+随机舞蹈镜头
+随机舞蹈镜头
+随机舞蹈镜头
+随机舞蹈镜头
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2i_samples.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2i_samples.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9b729527cee2d4da1d28415e42c52c6627217d10
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2i_samples.txt
@@ -0,0 +1,8 @@
+A small cactus with a happy face in the Sahara desert.
+Bright scene, aerial view,ancient city, fantasy, gorgeous light, mirror reflection, high detail, wide angle lens.
+Nature vs human nature, surreal, UHD, 8k, hyper details, rich colors, photograph.
+Poster of a mechanical cat, techical Schematics viewed from front.
+Luffy from ONEPIECE, handsome face, fantasy.
+Real beautiful woman.
+A alpaca made of colorful building blocks, cyberpunk.
+artistic
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2i_sigma.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2i_sigma.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fa0f0a4cc7007348558acf7763bbf9b67b127380
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2i_sigma.txt
@@ -0,0 +1,10 @@
+Eiffel Tower was Made up of more than 2 million translucent straws to look like a cloud, with the bell tower at the top of the building, Michel installed huge foam-making machines in the forest to blow huge amounts of unpredictable wet clouds in the building's classic architecture.
+A gorgeously rendered papercraft world of a coral reef, rife with colorful fish and sea creatures.
+Full body shot, a French woman, Photography, French Streets background, backlighting, rim light, Fujifilm.
+Close-up photos of models, hazy light and shadow, laser metal hair accessories, soft and beautiful, light gold pupils, white eyelashes, low saturation, real skin details, clear pores and fine lines, light reflection and refraction, ultra-clear, cinematography, award-winning works.
+A litter of golden retriever puppies playing in the snow. Their heads pop out of the snow, covered in.
+Lego model, future rocket station, intricate details, high resolution, unreal engine, UHD
+One giant, sharp, metal square mirror in the center of the frame, four young people on the foreground, background sunny palm oil planation, tropical, realistic style, photography, nostalgic, green tone, mysterious, dreamy, bright color.
+Modern luxury contemporary luxury home interiors house, in the style of mimicking ruined materials, ray tracing, haunting houses, and stone, capture the essence of nature, gray and bronze, dynamic outdoor shots.
+Over the shoulder game perspective, game screen of Diablo 4, Inside the gorgeous palace is the wet ground, The necromancer knelt before the king, and a horde of skeletons he summoned stood at his side, cinematic light.
+A curvy timber house near a sea, designed by Zaha Hadid, represent the image of a cold, modern architecture, at night, white lighting, highly detailed.
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_car.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_car.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f9bd226fa90d7554b73ba8d7011a25afa970eadc
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_car.txt
@@ -0,0 +1 @@
+|0|A car driving on the in forest.|2|A car driving in the desert.|4|A car driving near the coast.|6|A car driving in the city.|8|A car driving near a mountain.|10|A car driving on the surface of a river.|12|A car driving on the surface of the earch.|14|A car driving in the universe.{"reference_path": "https://cdn.openai.com/tmp/s/interp/d0.mp4", "mask_strategy": "0,0,0,0,16,0.4"}
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_latte.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_latte.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a61359ca41325db817d8eb2c6a255d997f0382ca
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_latte.txt
@@ -0,0 +1,7 @@
+Yellow and black tropical fish dart through the sea.
+An epic tornado attacking above aglowing city at night.
+Slow pan upward of blazing oak fire in an indoor fireplace.
+a cat wearing sunglasses and working as a lifeguard at pool.
+Sunset over the sea.
+A dog in astronaut suit and sunglasses floating in space.
+A astronaut in flying in space, 4k, high resolution
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_pllava.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_pllava.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f0ab0f5bbdaefc5fa881a6eba51b5bb5c21f6943
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_pllava.txt
@@ -0,0 +1,10 @@
+a close-up shot of a woman standing in a room with a white wall and a plant on the left side. the woman has curly hair and is wearing a green tank top. she is looking to the side with a neutral expression on her face. the lighting in the room is soft and appears to be natural, coming from the left side of the frame. the focus is on the woman, with the background being out of focus. there are no texts or other objects in the video. the style of the video is a simple, candid portrait with a shallow depth of field.
+a serene scene of a pond filled with water lilies. the water is a deep blue, providing a striking contrast to the pink and white flowers that float on its surface. the flowers, in full bloom, are the main focus of the video. they are scattered across the pond, with some closer to the camera and others further away, creating a sense of depth. the pond is surrounded by lush greenery, adding a touch of nature to the scene. the video is taken from a low angle, looking up at the flowers, which gives a unique perspective and emphasizes their beauty. the overall composition of the video suggests a peaceful and tranquil setting, likely a garden or a park.
+a professional setting where a woman is presenting a slide from a presentation. she is standing in front of a projector screen, which displays a bar chart. the chart is colorful, with bars of different heights, indicating some sort of data comparison. the woman is holding a pointer, which she uses to highlight specific parts of the chart. she is dressed in a white blouse and black pants, and her hair is styled in a bun. the room has a modern design, with a sleek black floor and a white ceiling. the lighting is bright, illuminating the woman and the projector screen. the focus of the image is on the woman and the projector screen, with the background being out of focus. there are no texts visible in the image. the relative positions of the objects suggest that the woman is the main subject of the image, and the projector screen is the object of her attention. the image does not provide any information about the content of the presentation or the context of the meeting.
+a bustling city street from the perspective of a car. the car, a sleek black sedan, is in motion, driving down the street. the dashboard of the car is visible in the foreground, providing a view of the road ahead. the street is lined with parked cars on both sides, their colors muted in the bright sunlight. buildings rise on either side of the street, their windows reflecting the sunlight. the sky above is a clear blue, and the sun is shining brightly, casting a warm glow on the scene. the street is busy with pedestrians and other vehicles, adding to the dynamic nature of the scene. the video does not contain any text. the relative positions of the objects suggest a typical city street scene with the car in the foreground, the parked cars on either side, and the buildings in the background. the sunlight illuminates the scene, highlighting the colors and details of the objects. the pedestrians and other vehicles are in motion, adding a sense of life and activity to the scene. the buildings provide a sense of depth and scale to the image. the video does not contain any text or countable objects. the
+a serene scene in a park. the sun is shining brightly, casting a warm glow on the lush green trees and the grassy field. the camera is positioned low, looking up at the towering trees, which are the main focus of the image. the trees are dense and full of leaves, creating a canopy of green that fills the frame. the sunlight filters through the leaves, creating a beautiful pattern of light and shadow on the ground. the overall atmosphere of the video is peaceful and tranquil, evoking a sense of calm and relaxation.
+a moment in a movie theater. a couple is seated in the middle of the theater, engrossed in the movie they are watching. the man is dressed in a casual outfit, complete with a pair of sunglasses, while the woman is wearing a cozy sweater. they are seated on a red theater seat, which stands out against the dark surroundings. the theater itself is dimly lit, with the screen displaying the movie they are watching. the couple appears to be enjoying the movie, their attention completely absorbed by the on-screen action. the theater is mostly empty, with only a few other seats visible in the background. the video does not contain any text or additional objects. the relative positions of the objects are such that the couple is in the foreground, while the screen and the other seats are in the background. the focus of the video is clearly on the couple and their shared experience of watching a movie in a theater.
+a scene where a person is examining a dog. the person is wearing a blue shirt with the word "volunteer" printed on it. the dog is lying on its side, and the person is using a stethoscope to listen to the dog's heartbeat. the dog appears to be a golden retriever and is looking directly at the camera. the background is blurred, but it seems to be an indoor setting with a white wall. the person's focus is on the dog, and they seem to be checking its health. the dog's expression is calm, and it seems to be comfortable with the person's touch. the overall atmosphere of the video is calm and professional.
+a close-up shot of a woman applying makeup. she is using a black brush to apply a dark powder to her face. the woman has blonde hair and is wearing a black top. the background is black, which contrasts with her skin tone and the makeup. the focus is on her face and the brush, with the rest of her body and the background being out of focus. the lighting is soft and even, highlighting the texture of the makeup and the woman's skin. there are no texts or other objects in the video. the woman's expression is neutral, and she is looking directly at the camera. the video does not contain any action, as it is a still shot of a woman applying makeup. the relative position of the woman and the brush is such that the brush is in her hand and is being used to apply the makeup to her face. the video does not contain any other objects or actions. the woman is the only person in the video, and she is the main subject. the video does not contain any sound. the description is based on the visible content of the video and does not include any assumptions or interpretations.
+a young woman is seated in a black gaming chair in a room filled with computer monitors and other gaming equipment. she is wearing a red tank top and black pants, and her hair is styled in loose waves. the room is dimly lit, with the glow of the monitors casting a soft light on her face. she is holding a black game controller in her hands, and her attention is focused on the screen in front of her. the room is filled with other gaming equipment, including keyboards and mice, and there are other chairs and desks scattered around the room. the woman appears to be engrossed in her game, her posture relaxed yet focused. the room is quiet, the only sound coming from the beeps and boops of the game. the woman is the only person in the room, adding a sense of solitude to the scene. the video does not contain any text. the relative positions of the objects suggest a well-organized gaming setup, with the woman at the center, surrounded by her gaming equipment. the video does not contain any action, but the woman's focused expression suggests that she is in the middle of an intense g
+a breathtaking aerial view of a coastal landscape at sunset. the sky, painted in hues of orange and pink, serves as a stunning backdrop to the scene. the sun, partially obscured by the horizon, casts a warm glow on the landscape below. the foreground of the image is dominated by a rocky cliff, its rugged surface adding a touch of raw beauty to the scene. the cliff's edge is adorned with patches of green vegetation, providing a stark contrast to the otherwise barren landscape. the middle ground of the image reveals a winding road that hugs the coastline. the road, appearing as a thin line against the vast expanse of the landscape, guides the viewer's eye towards the horizon. in the background, the silhouette of mountains can be seen, their peaks shrouded in a light mist. the mountains, along with the road, add depth to the image, creating a sense of distance and scale. overall, the video presents a serene and majestic coastal landscape, captured at the perfect moment of sunset. the colors
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_ref.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_ref.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c0debe5c177192ae57e65f06fc1bc7491ff08b27
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_ref.txt
@@ -0,0 +1,6 @@
+Drone view of waves crashing against the rugged cliffs along Big Sur’s garay point beach. The crashing blue waters create white-tipped waves, while the golden light of the setting sun illuminates the rocky shore. A small island with a lighthouse sits in the distance, and green shrubbery covers the cliff’s edge. The steep drop from the road down to the beach is a dramatic feat, with the cliff's edges jutting out over the sea. This is a view that captures the raw beauty of the coast and the rugged landscape of the Pacific Coast Highway.
+In an ornate, historical hall, a massive tidal wave peaks and begins to crash. Two surfers, seizing the moment, skillfully navigate the face of the wave.
+Pirate ship in a cosmic maelstrom nebula.
+Drone view of waves crashing against the rugged cliffs along Big Sur’s garay point beach. The crashing blue waters create white-tipped waves, while the golden light of the setting sun illuminates the rocky shore. A small island with a lighthouse sits in the distance, and green shrubbery covers the cliff’s edge. The steep drop from the road down to the beach is a dramatic feat, with the cliff's edges jutting out over the sea. This is a view that captures the raw beauty of the coast and the rugged landscape of the Pacific Coast Highway.
+A sad small cactus with in the Sahara desert becomes happy.
+A car driving on a road in the middle of a desert.
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_samples.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_samples.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7953f3752f0a9648bbb716c1cea9cbf99f5237b4
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_samples.txt
@@ -0,0 +1,10 @@
+A soaring drone footage captures the majestic beauty of a coastal cliff, its red and yellow stratified rock faces rich in color and against the vibrant turquoise of the sea. Seabirds can be seen taking flight around the cliff's precipices. As the drone slowly moves from different angles, the changing sunlight casts shifting shadows that highlight the rugged textures of the cliff and the surrounding calm sea. The water gently laps at the rock base and the greenery that clings to the top of the cliff, and the scene gives a sense of peaceful isolation at the fringes of the ocean. The video captures the essence of pristine natural beauty untouched by human structures.
+A majestic beauty of a waterfall cascading down a cliff into a serene lake. The waterfall, with its powerful flow, is the central focus of the video. The surrounding landscape is lush and green, with trees and foliage adding to the natural beauty of the scene. The camera angle provides a bird's eye view of the waterfall, allowing viewers to appreciate the full height and grandeur of the waterfall. The video is a stunning representation of nature's power and beauty.
+A vibrant scene of a snowy mountain landscape. The sky is filled with a multitude of colorful hot air balloons, each floating at different heights, creating a dynamic and lively atmosphere. The balloons are scattered across the sky, some closer to the viewer, others further away, adding depth to the scene. Below, the mountainous terrain is blanketed in a thick layer of snow, with a few patches of bare earth visible here and there. The snow-covered mountains provide a stark contrast to the colorful balloons, enhancing the visual appeal of the scene. In the foreground, a few cars can be seen driving along a winding road that cuts through the mountains. The cars are small compared to the vastness of the landscape, emphasizing the grandeur of the surroundings. The overall style of the video is a mix of adventure and tranquility, with the hot air balloons adding a touch of whimsy to the otherwise serene mountain landscape. The video is likely shot during the day, as the lighting is bright and even, casting soft shadows on the snow-covered mountains.
+The vibrant beauty of a sunflower field. The sunflowers, with their bright yellow petals and dark brown centers, are in full bloom, creating a stunning contrast against the green leaves and stems. The sunflowers are arranged in neat rows, creating a sense of order and symmetry. The sun is shining brightly, casting a warm glow on the flowers and highlighting their intricate details. The video is shot from a low angle, looking up at the sunflowers, which adds a sense of grandeur and awe to the scene. The sunflowers are the main focus of the video, with no other objects or people present. The video is a celebration of nature's beauty and the simple joy of a sunny day in the countryside.
+A serene underwater scene featuring a sea turtle swimming through a coral reef. The turtle, with its greenish-brown shell, is the main focus of the video, swimming gracefully towards the right side of the frame. The coral reef, teeming with life, is visible in the background, providing a vibrant and colorful backdrop to the turtle's journey. Several small fish, darting around the turtle, add a sense of movement and dynamism to the scene. The video is shot from a slightly elevated angle, providing a comprehensive view of the turtle's surroundings. The overall style of the video is calm and peaceful, capturing the beauty and tranquility of the underwater world.
+A vibrant underwater scene. A group of blue fish, with yellow fins, are swimming around a coral reef. The coral reef is a mix of brown and green, providing a natural habitat for the fish. The water is a deep blue, indicating a depth of around 30 feet. The fish are swimming in a circular pattern around the coral reef, indicating a sense of motion and activity. The overall scene is a beautiful representation of marine life.
+A bustling city street at night, filled with the glow of car headlights and the ambient light of streetlights. The scene is a blur of motion, with cars speeding by and pedestrians navigating the crosswalks. The cityscape is a mix of towering buildings and illuminated signs, creating a vibrant and dynamic atmosphere. The perspective of the video is from a high angle, providing a bird's eye view of the street and its surroundings. The overall style of the video is dynamic and energetic, capturing the essence of urban life at night.
+A snowy forest landscape with a dirt road running through it. The road is flanked by trees covered in snow, and the ground is also covered in snow. The sun is shining, creating a bright and serene atmosphere. The road appears to be empty, and there are no people or animals visible in the video. The style of the video is a natural landscape shot, with a focus on the beauty of the snowy forest and the peacefulness of the road.
+The dynamic movement of tall, wispy grasses swaying in the wind. The sky above is filled with clouds, creating a dramatic backdrop. The sunlight pierces through the clouds, casting a warm glow on the scene. The grasses are a mix of green and brown, indicating a change in seasons. The overall style of the video is naturalistic, capturing the beauty of the landscape in a realistic manner. The focus is on the grasses and their movement, with the sky serving as a secondary element. The video does not contain any human or animal elements.
+A serene night scene in a forested area. The first frame shows a tranquil lake reflecting the star-filled sky above. The second frame reveals a beautiful sunset, casting a warm glow over the landscape. The third frame showcases the night sky, filled with stars and a vibrant Milky Way galaxy. The video is a time-lapse, capturing the transition from day to night, with the lake and forest serving as a constant backdrop. The style of the video is naturalistic, emphasizing the beauty of the night sky and the peacefulness of the forest.
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_short.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_short.txt
new file mode 100644
index 0000000000000000000000000000000000000000..002864828da40e46dc4e19d7d580ccedfa1be7d9
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_short.txt
@@ -0,0 +1,20 @@
+A fat rabbit wearing a purple robe walking through a fantasy landscape
+Waves crashing against a lone lighthouse, ominous lighting
+A mystical forest showcasing the adventures of travelers who enter
+A blue-haired mage singing
+A surreal landscape with floating islands and waterfalls in the sky craft
+A blue bird standing in water
+A young man walks alone by the seaside
+Pink rose on a glass surface with droplets, close-up
+Drove viewpoint, a subway train coming out of a tunnel
+Space with all planets green and pink color with background of bright white stars
+A city floating in an astral space, with stars and nebulae
+Sunrise on top of a high-rise building
+Pink and cyan powder explosions
+Deers in the woods gaze into the camera under the sunlight
+In a flash of lightning, a wizard appeared from thin air, his long robes billowing in the wind
+A futuristic cyberpunk cityscape at night with towering neon-lit skyscrapers
+A scene where the trees, flowers, and animals come together to create a symphony of nature
+A ghostly ship sailing through the clouds, navigating through a sea under a moonlit sky
+A sunset with beautiful beach
+A young man walking alone in the forest
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_sora.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_sora.txt
new file mode 100644
index 0000000000000000000000000000000000000000..eeb887b1863e590e45054fb766694c1275cee987
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/t2v_sora.txt
@@ -0,0 +1,48 @@
+A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse. She wears sunglasses and red lipstick. She walks confidently and casually. The street is damp and reflective, creating a mirror effect of the colorful lights. Many pedestrians walk about.
+Several giant wooly mammoths approach treading through a snowy meadow, their long wooly fur lightly blows in the wind as they walk, snow covered trees and dramatic snow capped mountains in the distance, mid afternoon light with wispy clouds and a sun high in the distance creates a warm glow, the low camera view is stunning capturing the large furry mammal with beautiful photography, depth of field.
+A movie trailer featuring the adventures of the 30 year old space man wearing a red wool knitted motorcycle helmet, blue sky, salt desert, cinematic style, shot on 35mm film, vivid colors.
+Drone view of waves crashing against the rugged cliffs along Big Sur’s garay point beach. The crashing blue waters create white-tipped waves, while the golden light of the setting sun illuminates the rocky shore. A small island with a lighthouse sits in the distance, and green shrubbery covers the cliff’s edge. The steep drop from the road down to the beach is a dramatic feat, with the cliff’s edges jutting out over the sea. This is a view that captures the raw beauty of the coast and the rugged landscape of the Pacific Coast Highway.
+Animated scene features a close-up of a short fluffy monster kneeling beside a melting red candle. The art style is 3D and realistic, with a focus on lighting and texture. The mood of the painting is one of wonder and curiosity, as the monster gazes at the flame with wide eyes and open mouth. Its pose and expression convey a sense of innocence and playfulness, as if it is exploring the world around it for the first time. The use of warm colors and dramatic lighting further enhances the cozy atmosphere of the image.
+A gorgeously rendered papercraft world of a coral reef, rife with colorful fish and sea creatures.
+This close-up shot of a Victoria crowned pigeon showcases its striking blue plumage and red chest. Its crest is made of delicate, lacy feathers, while its eye is a striking red color. The bird’s head is tilted slightly to the side, giving the impression of it looking regal and majestic. The background is blurred, drawing attention to the bird’s striking appearance.
+Photorealistic closeup video of two pirate ships battling each other as they sail inside a cup of coffee.
+A young man at his 20s is sitting on a piece of cloud in the sky, reading a book.
+Historical footage of California during the gold rush.
+A close up view of a glass sphere that has a zen garden within it. There is a small dwarf in the sphere who is raking the zen garden and creating patterns in the sand.
+Extreme close up of a 24 year old woman’s eye blinking, standing in Marrakech during magic hour, cinematic film shot in 70mm, depth of field, vivid colors, cinematic
+A cartoon kangaroo disco dances.
+A beautiful homemade video showing the people of Lagos, Nigeria in the year 2056. Shot with a mobile phone camera.
+A petri dish with a bamboo forest growing within it that has tiny red pandas running around.
+The camera rotates around a large stack of vintage televisions all showing different programs — 1950s sci-fi movies, horror movies, news, static, a 1970s sitcom, etc, set inside a large New York museum gallery.
+3D animation of a small, round, fluffy creature with big, expressive eyes explores a vibrant, enchanted forest. The creature, a whimsical blend of a rabbit and a squirrel, has soft blue fur and a bushy, striped tail. It hops along a sparkling stream, its eyes wide with wonder. The forest is alive with magical elements: flowers that glow and change colors, trees with leaves in shades of purple and silver, and small floating lights that resemble fireflies. The creature stops to interact playfully with a group of tiny, fairy-like beings dancing around a mushroom ring. The creature looks up in awe at a large, glowing tree that seems to be the heart of the forest.
+The camera follows behind a white vintage SUV with a black roof rack as it speeds up a steep dirt road surrounded by pine trees on a steep mountain slope, dust kicks up from it’s tires, the sunlight shines on the SUV as it speeds along the dirt road, casting a warm glow over the scene. The dirt road curves gently into the distance, with no other cars or vehicles in sight. The trees on either side of the road are redwoods, with patches of greenery scattered throughout. The car is seen from the rear following the curve with ease, making it seem as if it is on a rugged drive through the rugged terrain. The dirt road itself is surrounded by steep hills and mountains, with a clear blue sky above with wispy clouds.
+Reflections in the window of a train traveling through the Tokyo suburbs.
+A drone camera circles around a beautiful historic church built on a rocky outcropping along the Amalfi Coast, the view showcases historic and magnificent architectural details and tiered pathways and patios, waves are seen crashing against the rocks below as the view overlooks the horizon of the coastal waters and hilly landscapes of the Amalfi Coast Italy, several distant people are seen walking and enjoying vistas on patios of the dramatic ocean views, the warm glow of the afternoon sun creates a magical and romantic feeling to the scene, the view is stunning captured with beautiful photography.
+A large orange octopus is seen resting on the bottom of the ocean floor, blending in with the sandy and rocky terrain. Its tentacles are spread out around its body, and its eyes are closed. The octopus is unaware of a king crab that is crawling towards it from behind a rock, its claws raised and ready to attack. The crab is brown and spiny, with long legs and antennae. The scene is captured from a wide angle, showing the vastness and depth of the ocean. The water is clear and blue, with rays of sunlight filtering through. The shot is sharp and crisp, with a high dynamic range. The octopus and the crab are in focus, while the background is slightly blurred, creating a depth of field effect.
+A flock of paper airplanes flutters through a dense jungle, weaving around trees as if they were migrating birds.
+A cat waking up its sleeping owner demanding breakfast. The owner tries to ignore the cat, but the cat tries new tactics and finally the owner pulls out a secret stash of treats from under the pillow to hold the cat off a little longer.
+Borneo wildlife on the Kinabatangan River
+A Chinese Lunar New Year celebration video with Chinese Dragon.
+Tour of an art gallery with many beautiful works of art in different styles.
+Beautiful, snowy Tokyo city is bustling. The camera moves through the bustling city street, following several people enjoying the beautiful snowy weather and shopping at nearby stalls. Gorgeous sakura petals are flying through the wind along with snowflakes.
+A stop motion animation of a flower growing out of the windowsill of a suburban house.
+The story of a robot’s life in a cyberpunk setting.
+An extreme close-up of an gray-haired man with a beard in his 60s, he is deep in thought pondering the history of the universe as he sits at a cafe in Paris, his eyes focus on people offscreen as they walk as he sits mostly motionless, he is dressed in a wool coat suit coat with a button-down shirt , he wears a brown beret and glasses and has a very professorial appearance, and the end he offers a subtle closed-mouth smile as if he found the answer to the mystery of life, the lighting is very cinematic with the golden light and the Parisian streets and city in the background, depth of field, cinematic 35mm film.
+A beautiful silhouette animation shows a wolf howling at the moon, feeling lonely, until it finds its pack.
+New York City submerged like Atlantis. Fish, whales, sea turtles and sharks swim through the streets of New York.
+A litter of golden retriever puppies playing in the snow. Their heads pop out of the snow, covered in.
+Step-printing scene of a person running, cinematic film shot in 35mm.
+Five gray wolf pups frolicking and chasing each other around a remote gravel road, surrounded by grass. The pups run and leap, chasing each other, and nipping at each other, playing.
+Basketball through hoop then explodes.
+Archeologists discover a generic plastic chair in the desert, excavating and dusting it with great care.
+A grandmother with neatly combed grey hair stands behind a colorful birthday cake with numerous candles at a wood dining room table, expression is one of pure joy and happiness, with a happy glow in her eye. She leans forward and blows out the candles with a gentle puff, the cake has pink frosting and sprinkles and the candles cease to flicker, the grandmother wears a light blue blouse adorned with floral patterns, several happy friends and family sitting at the table can be seen celebrating, out of focus. The scene is beautifully captured, cinematic, showing a 3/4 view of the grandmother and the dining room. Warm color tones and soft lighting enhance the mood.
+The camera directly faces colorful buildings in Burano Italy. An adorable dalmation looks through a window on a building on the ground floor. Many people are walking and cycling along the canal streets in front of the buildings.
+An adorable happy otter confidently stands on a surfboard wearing a yellow lifejacket, riding along turquoise tropical waters near lush tropical islands, 3D digital render art style.
+This close-up shot of a chameleon showcases its striking color changing capabilities. The background is blurred, drawing attention to the animal’s striking appearance.
+A corgi vlogging itself in tropical Maui.
+A white and orange tabby cat is seen happily darting through a dense garden, as if chasing something. Its eyes are wide and happy as it jogs forward, scanning the branches, flowers, and leaves as it walks. The path is narrow as it makes its way between all the plants. the scene is captured from a ground-level angle, following the cat closely, giving a low and intimate perspective. The image is cinematic with warm tones and a grainy texture. The scattered daylight between the leaves and plants above creates a warm contrast, accentuating the cat’s orange fur. The shot is clear and sharp, with a shallow depth of field.
+Aerial view of Santorini during the blue hour, showcasing the stunning architecture of white Cycladic buildings with blue domes. The caldera views are breathtaking, and the lighting creates a beautiful, serene atmosphere.
+Tiltshift of a construction site filled with workers, equipment, and heavy machinery.
+A giant, towering cloud in the shape of a man looms over the earth. The cloud man shoots lighting bolts down to the earth.
+A Samoyed and a Golden Retriever dog are playfully romping through a futuristic neon city at night. The neon lights emitted from the nearby buildings glistens off of their fur.
+The Glenfinnan Viaduct is a historic railway bridge in Scotland, UK, that crosses over the west highland line between the towns of Mallaig and Fort William. It is a stunning sight as a steam train leaves the bridge, traveling over the arch-covered viaduct. The landscape is dotted with lush greenery and rocky mountains, creating a picturesque backdrop for the train journey. The sky is blue and the sun is shining, making for a beautiful day to explore this majestic spot.
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/ucf101_id.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/ucf101_id.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e8371f00609f33a59378dd2f6bb4385a7df8bd63
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/ucf101_id.txt
@@ -0,0 +1,6 @@
+0
+1
+2
+3
+4
+5
diff --git a/exp_code/1_benchmark/Open-Sora_v12/assets/texts/ucf101_labels.txt b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/ucf101_labels.txt
new file mode 100644
index 0000000000000000000000000000000000000000..264dbfd8837a4b89b81d05b06c48b567dfa1d150
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/assets/texts/ucf101_labels.txt
@@ -0,0 +1,6 @@
+Apply Eye Makeup
+Apply Lipstick
+Archery
+Baby Crawling
+Balance Beam
+Band Marching
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/dit/inference/16x256x256.py b/exp_code/1_benchmark/Open-Sora_v12/configs/dit/inference/16x256x256.py
new file mode 100644
index 0000000000000000000000000000000000000000..44818fe095f5f16f960d5e7d0c7f974076aaeaa7
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/dit/inference/16x256x256.py
@@ -0,0 +1,31 @@
+num_frames = 16
+fps = 8
+image_size = (256, 256)
+
+# Define model
+model = dict(
+ type="DiT-XL/2",
+ condition="text",
+ from_pretrained="PRETRAINED_MODEL",
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="clip",
+ from_pretrained="openai/clip-vit-base-patch32",
+ model_max_length=77,
+)
+scheduler = dict(
+ type="dpm-solver",
+ num_sampling_steps=20,
+ cfg_scale=4.0,
+)
+dtype = "bf16"
+
+# Others
+batch_size = 2
+seed = 42
+prompt_path = "./assets/texts/ucf101_labels.txt"
+save_dir = "./samples/samples/"
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/dit/inference/1x256x256-class.py b/exp_code/1_benchmark/Open-Sora_v12/configs/dit/inference/1x256x256-class.py
new file mode 100644
index 0000000000000000000000000000000000000000..bebaa11e286db0ea7968723909482e18f28a12c3
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/dit/inference/1x256x256-class.py
@@ -0,0 +1,31 @@
+num_frames = 1
+fps = 1
+image_size = (256, 256)
+
+# Define model
+model = dict(
+ type="DiT-XL/2",
+ no_temporal_pos_emb=True,
+ condition="label_1000",
+ from_pretrained="DiT-XL-2-256x256.pt",
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="classes",
+ num_classes=1000,
+)
+scheduler = dict(
+ type="dpm-solver",
+ num_sampling_steps=20,
+ cfg_scale=4.0,
+)
+dtype = "bf16"
+
+# Others
+batch_size = 2
+seed = 42
+prompt_path = "./assets/texts/imagenet_id.txt"
+save_dir = "./samples/samples/"
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/dit/inference/1x256x256.py b/exp_code/1_benchmark/Open-Sora_v12/configs/dit/inference/1x256x256.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7cb9a2d20e6ae3a19e468f493f0e125cbb0a33f
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/dit/inference/1x256x256.py
@@ -0,0 +1,32 @@
+num_frames = 1
+fps = 1
+image_size = (256, 256)
+
+# Define model
+model = dict(
+ type="DiT-XL/2",
+ no_temporal_pos_emb=True,
+ condition="text",
+ from_pretrained="PRETRAINED_MODEL",
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="clip",
+ from_pretrained="openai/clip-vit-base-patch32",
+ model_max_length=77,
+)
+scheduler = dict(
+ type="dpm-solver",
+ num_sampling_steps=20,
+ cfg_scale=4.0,
+)
+dtype = "bf16"
+
+# Others
+batch_size = 2
+seed = 42
+prompt_path = "./assets/texts/imagenet_labels.txt"
+save_dir = "./samples/samples/"
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/dit/train/16x256x256.py b/exp_code/1_benchmark/Open-Sora_v12/configs/dit/train/16x256x256.py
new file mode 100644
index 0000000000000000000000000000000000000000..c36e06b65f577d1faa1231886273167d2a611926
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/dit/train/16x256x256.py
@@ -0,0 +1,50 @@
+# Define dataset
+dataset = dict(
+ type="VideoTextDataset",
+ data_path=None,
+ num_frames=16,
+ frame_interval=3,
+ image_size=(256, 256),
+)
+
+# Define acceleration
+num_workers = 4
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="DiT-XL/2",
+ from_pretrained="DiT-XL-2-256x256.pt",
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="clip",
+ from_pretrained="openai/clip-vit-base-patch32",
+ model_max_length=77,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 1000
+load = None
+
+batch_size = 8
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/dit/train/1x256x256.py b/exp_code/1_benchmark/Open-Sora_v12/configs/dit/train/1x256x256.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa5d478d00584cef2188a578048ff0b3dd6990ba
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/dit/train/1x256x256.py
@@ -0,0 +1,51 @@
+# Define dataset
+dataset = dict(
+ type="VideoTextDataset",
+ data_path=None,
+ num_frames=1,
+ frame_interval=1,
+ image_size=(256, 256),
+ transform_name="center",
+)
+
+# Define acceleration
+num_workers = 4
+dtype = "bf16"
+grad_checkpoint = False
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="DiT-XL/2",
+ no_temporal_pos_emb=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="clip",
+ from_pretrained="openai/clip-vit-base-patch32",
+ model_max_length=77,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 1000
+load = None
+
+batch_size = 128
+lr = 1e-4 # according to DiT repo
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/latte/inference/16x256x256-class.py b/exp_code/1_benchmark/Open-Sora_v12/configs/latte/inference/16x256x256-class.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ccf6d43604240e724f0e78f2de3aefa85449277
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/latte/inference/16x256x256-class.py
@@ -0,0 +1,30 @@
+num_frames = 16
+fps = 8
+image_size = (256, 256)
+
+# Define model
+model = dict(
+ type="Latte-XL/2",
+ condition="label_101",
+ from_pretrained="Latte-XL-2-256x256-ucf101.pt",
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="classes",
+ num_classes=101,
+)
+scheduler = dict(
+ type="dpm-solver",
+ num_sampling_steps=20,
+ cfg_scale=4.0,
+)
+dtype = "bf16"
+
+# Others
+batch_size = 2
+seed = 42
+prompt_path = "./assets/texts/ucf101_id.txt"
+save_dir = "./samples/samples/"
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/latte/inference/16x256x256.py b/exp_code/1_benchmark/Open-Sora_v12/configs/latte/inference/16x256x256.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bdd58fad5f81bcca29c2d975fd2dd89a4bf7c58
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/latte/inference/16x256x256.py
@@ -0,0 +1,31 @@
+num_frames = 16
+fps = 8
+image_size = (256, 256)
+
+# Define model
+model = dict(
+ type="Latte-XL/2",
+ condition="text",
+ from_pretrained="PRETRAINED_MODEL",
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="clip",
+ from_pretrained="openai/clip-vit-base-patch32",
+ model_max_length=77,
+)
+scheduler = dict(
+ type="dpm-solver",
+ num_sampling_steps=20,
+ cfg_scale=4.0,
+)
+dtype = "bf16"
+
+# Others
+batch_size = 2
+seed = 42
+prompt_path = "./assets/texts/ucf101_labels.txt"
+save_dir = "./samples/samples/"
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/latte/train/16x256x256.py b/exp_code/1_benchmark/Open-Sora_v12/configs/latte/train/16x256x256.py
new file mode 100644
index 0000000000000000000000000000000000000000..e087f8a99638a5d5036af94d5b6cecc80a867bc3
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/latte/train/16x256x256.py
@@ -0,0 +1,49 @@
+# Define dataset
+dataset = dict(
+ type="VideoTextDataset",
+ data_path=None,
+ num_frames=16,
+ frame_interval=3,
+ image_size=(256, 256),
+)
+
+# Define acceleration
+num_workers = 4
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="Latte-XL/2",
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="clip",
+ from_pretrained="openai/clip-vit-base-patch32",
+ model_max_length=77,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 1000
+load = None
+
+batch_size = 8
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/inference/sample-ref.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/inference/sample-ref.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae80774f3c6d675347f9952cfd8fc8ae02820526
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/inference/sample-ref.py
@@ -0,0 +1,64 @@
+num_frames = 16
+frame_interval = 3
+fps = 24
+image_size = (240, 426)
+multi_resolution = "STDiT2"
+
+# Condition
+prompt_path = None
+prompt = [
+ 'Drone view of waves crashing against the rugged cliffs along Big Sur\'s garay point beach. {"reference_path": "assets/images/condition/cliff.png", "mask_strategy": "0"}',
+ 'A breathtaking sunrise scene.{"reference_path": "assets/images/condition/sunset1.png","mask_strategy": "0"}',
+ 'A car driving on the ocean.{"reference_path": "https://cdn.openai.com/tmp/s/interp/d0.mp4","mask_strategy": "0,0,-8,0,8"}',
+ 'A snowy forest.{"reference_path": "https://cdn.pixabay.com/video/2021/04/25/72171-542991404_large.mp4","mask_strategy": "0,0,0,0,15,0.8"}',
+ 'A breathtaking sunrise scene.{"reference_path": "assets/images/condition/sunset1.png;assets/images/condition/sunset2.png","mask_strategy": "0;0,1,0,-1,1"}',
+ '|0|a white jeep equipped with a roof rack driving on a dirt road in a coniferous forest.|2|a white jeep equipped with a roof rack driving on a dirt road in the desert.|4|a white jeep equipped with a roof rack driving on a dirt road in a mountain.|6|A white jeep equipped with a roof rack driving on a dirt road in a city.|8|a white jeep equipped with a roof rack driving on a dirt road on the surface of a river.|10|a white jeep equipped with a roof rack driving on a dirt road under the lake.|12|a white jeep equipped with a roof rack flying into the sky.|14|a white jeep equipped with a roof rack driving in the universe. Earth is the background.{"reference_path": "https://cdn.openai.com/tmp/s/interp/d0.mp4", "mask_strategy": "0,0,0,0,15"}',
+]
+
+loop = 2
+condition_frame_length = 4
+# (
+# loop id, [the loop index of the condition image or video]
+# reference id, [the index of the condition image or video in the reference_path]
+# reference start, [the start frame of the condition image or video]
+# target start, [the location to insert]
+# length, [the number of frames to insert]
+# edit_ratio [the edit rate of the condition image or video]
+# )
+# See https://github.com/hpcaitech/Open-Sora/blob/main/docs/config.md#advanced-inference-config for more details
+# See https://github.com/hpcaitech/Open-Sora/blob/main/docs/commands.md#inference-with-open-sora-11 for more examples
+
+# Define model
+model = dict(
+ type="STDiT2-XL/2",
+ from_pretrained="hpcai-tech/OpenSora-STDiT-v2-stage3",
+ input_sq_size=512,
+ qk_norm=True,
+ qk_norm_legacy=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ cache_dir=None, # "/mnt/hdd/cached_models",
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ cache_dir=None, # "/mnt/hdd/cached_models",
+ model_max_length=200,
+)
+scheduler = dict(
+ type="iddpm",
+ num_sampling_steps=100,
+ cfg_scale=7.0,
+ cfg_channel=3, # or None
+)
+dtype = "bf16"
+
+# Others
+batch_size = 1
+seed = 42
+save_dir = "./samples/samples/"
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/inference/sample.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/inference/sample.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2800466c67caaa30b889ee6977e57a08ae5dbe9
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/inference/sample.py
@@ -0,0 +1,44 @@
+num_frames = 16
+frame_interval = 3
+fps = 24
+image_size = (240, 426)
+multi_resolution = "STDiT2"
+
+# Define model
+model = dict(
+ type="STDiT2-XL/2",
+ from_pretrained="hpcai-tech/OpenSora-STDiT-v2-stage3",
+ input_sq_size=512,
+ qk_norm=True,
+ qk_norm_legacy=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ cache_dir=None, # "/mnt/hdd/cached_models",
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ cache_dir=None, # "/mnt/hdd/cached_models",
+ model_max_length=200,
+)
+scheduler = dict(
+ type="iddpm",
+ num_sampling_steps=100,
+ cfg_scale=7.0,
+ cfg_channel=3, # or None
+)
+dtype = "bf16"
+
+# Condition
+prompt_path = "./assets/texts/t2v_samples.txt"
+prompt = None # prompt has higher priority than prompt_path
+
+# Others
+batch_size = 1
+seed = 42
+save_dir = "./samples/samples/"
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/benchmark.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..da6cc0633af7f81316d52c53321700907e91ec2c
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/benchmark.py
@@ -0,0 +1,102 @@
+# this file is only for batch size search and is not used for training
+
+# Define dataset
+dataset = dict(
+ type="VariableVideoTextDataset",
+ data_path=None,
+ num_frames=None,
+ frame_interval=3,
+ image_size=(None, None),
+ transform_name="resize_crop",
+)
+
+# bucket config format:
+# 1. { resolution: {num_frames: (prob, batch_size)} }, in this case batch_size is ignored when searching
+# 2. { resolution: {num_frames: (prob, (max_batch_size, ))} }, batch_size is searched in the range [batch_size_start, max_batch_size), batch_size_start is configured via CLI
+# 3. { resolution: {num_frames: (prob, (min_batch_size, max_batch_size))} }, batch_size is searched in the range [min_batch_size, max_batch_size)
+# 4. { resolution: {num_frames: (prob, (min_batch_size, max_batch_size, step_size))} }, batch_size is searched in the range [min_batch_size, max_batch_size) with step_size (grid search)
+# 5. { resolution: {num_frames: (0.0, None)} }, this bucket will not be used
+
+bucket_config = {
+ # == manual search ==
+ # "240p": {128: (1.0, 2)}, # 4.28s/it
+ # "240p": {64: (1.0, 4)},
+ # "240p": {32: (1.0, 8)}, # 4.6s/it
+ # "240p": {16: (1.0, 16)}, # 4.6s/it
+ # "480p": {16: (1.0, 4)}, # 4.6s/it
+ # "720p": {16: (1.0, 2)}, # 5.89s/it
+ # "256": {1: (1.0, 256)}, # 4.5s/it
+ # "512": {1: (1.0, 96)}, # 4.7s/it
+ # "512": {1: (1.0, 128)}, # 6.3s/it
+ # "480p": {1: (1.0, 50)}, # 4.0s/it
+ # "1024": {1: (1.0, 32)}, # 6.8s/it
+ # "1024": {1: (1.0, 20)}, # 4.3s/it
+ # "1080p": {1: (1.0, 16)}, # 8.6s/it
+ # "1080p": {1: (1.0, 8)}, # 4.4s/it
+ # == stage 2 ==
+ # "240p": {
+ # 16: (1.0, (2, 32)),
+ # 32: (1.0, (2, 16)),
+ # 64: (1.0, (2, 8)),
+ # 128: (1.0, (2, 6)),
+ # },
+ # "256": {1: (1.0, (128, 300))},
+ # "512": {1: (0.5, (64, 128))},
+ # "480p": {1: (0.4, (32, 128)), 16: (0.4, (2, 32)), 32: (0.0, None)},
+ # "720p": {16: (0.1, (2, 16)), 32: (0.0, None)}, # No examples now
+ # "1024": {1: (0.3, (8, 64))},
+ # "1080p": {1: (0.3, (2, 32))},
+ # == stage 3 ==
+ "720p": {1: (20, 40), 32: (0.5, (2, 4)), 64: (0.5, (1, 1))},
+}
+
+
+# Define acceleration
+num_workers = 4
+num_bucket_build_workers = 16
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="STDiT2-XL/2",
+ from_pretrained=None,
+ input_sq_size=512, # pretrained model is trained on 512x512
+ qk_norm=True,
+ qk_norm_legacy=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=4,
+ local_files_only=True,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=200,
+ shardformer=True,
+ local_files_only=True,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 1000
+load = None
+
+batch_size = None
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/image.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/image.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee43c2ee3a57855a3d443995dda9fd7a07cd7f69
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/image.py
@@ -0,0 +1,66 @@
+# Define dataset
+dataset = dict(
+ type="VariableVideoTextDataset",
+ data_path=None,
+ num_frames=None,
+ frame_interval=3,
+ image_size=(None, None),
+ transform_name="resize_crop",
+)
+bucket_config = { # 6s/it
+ "256": {1: (1.0, 256)},
+ "512": {1: (1.0, 80)},
+ "480p": {1: (1.0, 52)},
+ "1024": {1: (1.0, 20)},
+ "1080p": {1: (1.0, 8)},
+}
+
+# Define acceleration
+num_workers = 4
+num_bucket_build_workers = 16
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="STDiT2-XL/2",
+ from_pretrained=None,
+ input_sq_size=512, # pretrained model is trained on 512x512
+ qk_norm=True,
+ qk_norm_legacy=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=4,
+ local_files_only=True,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=200,
+ shardformer=True,
+ local_files_only=True,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 500
+load = None
+
+batch_size = 10 # only for logging
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/image_rflow.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/image_rflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..08d52efb47ca3400546186bfe060ee5d9b6327a8
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/image_rflow.py
@@ -0,0 +1,88 @@
+# Define dataset
+# dataset = dict(
+# type="VariableVideoTextDataset",
+# data_path=None,
+# num_frames=None,
+# frame_interval=3,
+# image_size=(None, None),
+# transform_name="resize_crop",
+# )
+dataset = dict(
+ type="VideoTextDataset",
+ data_path=None,
+ num_frames=1,
+ frame_interval=1,
+ image_size=(256, 256),
+ transform_name="center",
+)
+bucket_config = { # 6s/it
+ "256": {1: (1.0, 256)},
+ "512": {1: (1.0, 80)},
+ "480p": {1: (1.0, 52)},
+ "1024": {1: (1.0, 20)},
+ "1080p": {1: (1.0, 8)},
+}
+
+# Define acceleration
+num_workers = 16
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+# model = dict(
+# type="DiT-XL/2",
+# from_pretrained="/home/zhaowangbo/wangbo/PixArt-alpha/pretrained_models/PixArt-XL-2-512x512.pth",
+# # input_sq_size=512, # pretrained model is trained on 512x512
+# enable_flash_attn=True,
+# enable_layernorm_kernel=True,
+# )
+model = dict(
+ type="PixArt-XL/2",
+ space_scale=1.0,
+ time_scale=1.0,
+ no_temporal_pos_emb=True,
+ from_pretrained="PixArt-XL-2-512x512.pth",
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+# model = dict(
+# type="DiT-XL/2",
+# # space_scale=1.0,
+# # time_scale=1.0,
+# no_temporal_pos_emb=True,
+# # from_pretrained="PixArt-XL-2-512x512.pth",
+# from_pretrained="/home/zhaowangbo/wangbo/PixArt-alpha/pretrained_models/PixArt-XL-2-512x512.pth",
+# enable_flash_attn=True,
+# enable_layernorm_kernel=True,
+# )
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=200,
+ shardformer=True,
+)
+scheduler = dict(
+ type="rflow",
+ # timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 10
+log_every = 10
+ckpt_every = 500
+load = None
+
+batch_size = 100 # only for logging
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/stage1.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/stage1.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfba99666cf5991630a39e6f73895defea95ca17
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/stage1.py
@@ -0,0 +1,78 @@
+# Define dataset
+dataset = dict(
+ type="VariableVideoTextDataset",
+ data_path=None,
+ num_frames=None,
+ frame_interval=3,
+ image_size=(None, None),
+ transform_name="resize_crop",
+)
+# IMG: 1024 (20%) 512 (30%) 256 (50%) drop (50%)
+bucket_config = { # 1s/it
+ "144p": {1: (0.5, 48), 16: (1.0, 6), 32: (1.0, 3), 96: (1.0, 1)},
+ "256": {1: (0.5, 24), 16: (0.5, 3), 48: (0.5, 1), 64: (0.0, None)},
+ "240p": {16: (0.3, 2), 32: (0.3, 1), 64: (0.0, None)},
+ "512": {1: (0.4, 12)},
+ "1024": {1: (0.3, 3)},
+}
+mask_ratios = {
+ "identity": 0.75,
+ "quarter_random": 0.025,
+ "quarter_head": 0.025,
+ "quarter_tail": 0.025,
+ "quarter_head_tail": 0.05,
+ "image_random": 0.025,
+ "image_head": 0.025,
+ "image_tail": 0.025,
+ "image_head_tail": 0.05,
+}
+
+# Define acceleration
+num_workers = 8
+num_bucket_build_workers = 16
+dtype = "bf16"
+grad_checkpoint = False
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="STDiT2-XL/2",
+ from_pretrained=None,
+ input_sq_size=512, # pretrained model is trained on 512x512
+ qk_norm=True,
+ qk_norm_legacy=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=4,
+ local_files_only=True,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=200,
+ shardformer=True,
+ local_files_only=True,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 500
+load = None
+
+batch_size = None
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/stage2.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/stage2.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce884aa8099d3590dd8770db763c42624810cf81
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/stage2.py
@@ -0,0 +1,80 @@
+# Define dataset
+dataset = dict(
+ type="VariableVideoTextDataset",
+ data_path=None,
+ num_frames=None,
+ frame_interval=3,
+ image_size=(None, None),
+ transform_name="resize_crop",
+)
+bucket_config = { # 7s/it
+ "144p": {1: (1.0, 48), 16: (1.0, 17), 32: (1.0, 9), 64: (1.0, 4), 128: (1.0, 1)},
+ "256": {1: (0.8, 254), 16: (0.5, 17), 32: (0.5, 9), 64: (0.5, 4), 128: (0.5, 1)},
+ "240p": {1: (0.1, 20), 16: (0.9, 17), 32: (0.8, 9), 64: (0.8, 4), 128: (0.8, 2)},
+ "512": {1: (0.5, 86), 16: (0.2, 4), 32: (0.2, 2), 64: (0.2, 1), 128: (0.0, None)},
+ "480p": {1: (0.4, 54), 16: (0.4, 4), 32: (0.0, None)},
+ "720p": {1: (0.1, 20), 16: (0.1, 2), 32: (0.0, None)},
+ "1024": {1: (0.3, 20)},
+ "1080p": {1: (0.4, 8)},
+}
+mask_ratios = {
+ "identity": 0.75,
+ "quarter_random": 0.025,
+ "quarter_head": 0.025,
+ "quarter_tail": 0.025,
+ "quarter_head_tail": 0.05,
+ "image_random": 0.025,
+ "image_head": 0.025,
+ "image_tail": 0.025,
+ "image_head_tail": 0.05,
+}
+
+# Define acceleration
+num_workers = 8
+num_bucket_build_workers = 16
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="STDiT2-XL/2",
+ from_pretrained=None,
+ input_sq_size=512, # pretrained model is trained on 512x512
+ qk_norm=True,
+ qk_norm_legacy=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=4,
+ local_files_only=True,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=200,
+ shardformer=True,
+ local_files_only=True,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 500
+load = None
+
+batch_size = None
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/stage3.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/stage3.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ccc8a6bbc632889c8ef05e32cb8a6001f61bf14
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/stage3.py
@@ -0,0 +1,80 @@
+# Define dataset
+dataset = dict(
+ type="VariableVideoTextDataset",
+ data_path=None,
+ num_frames=None,
+ frame_interval=3,
+ image_size=(None, None),
+ transform_name="resize_crop",
+)
+bucket_config = { # 13s/it
+ "144p": {1: (1.0, 200), 16: (1.0, 36), 32: (1.0, 18), 64: (1.0, 9), 128: (1.0, 4)},
+ "256": {1: (0.8, 200), 16: (0.5, 22), 32: (0.5, 11), 64: (0.5, 6), 128: (0.8, 4)},
+ "240p": {1: (0.8, 200), 16: (0.5, 22), 32: (0.5, 10), 64: (0.5, 6), 128: (0.5, 3)},
+ "360p": {1: (0.5, 120), 16: (0.5, 9), 32: (0.5, 4), 64: (0.5, 2), 128: (0.5, 1)},
+ "512": {1: (0.5, 120), 16: (0.5, 9), 32: (0.5, 4), 64: (0.5, 2), 128: (0.8, 1)},
+ "480p": {1: (0.4, 80), 16: (0.6, 6), 32: (0.6, 3), 64: (0.6, 1), 128: (0.0, None)},
+ "720p": {1: (0.4, 40), 16: (0.6, 3), 32: (0.6, 1), 96: (0.0, None)},
+ "1024": {1: (0.3, 40)},
+}
+mask_ratios = {
+ "identity": 0.75,
+ "quarter_random": 0.025,
+ "quarter_head": 0.025,
+ "quarter_tail": 0.025,
+ "quarter_head_tail": 0.05,
+ "image_random": 0.025,
+ "image_head": 0.025,
+ "image_tail": 0.025,
+ "image_head_tail": 0.05,
+}
+
+# Define acceleration
+num_workers = 8
+num_bucket_build_workers = 16
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="STDiT2-XL/2",
+ from_pretrained=None,
+ input_sq_size=512, # pretrained model is trained on 512x512
+ qk_norm=True,
+ qk_norm_legacy=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=4,
+ local_files_only=True,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=200,
+ shardformer=True,
+ local_files_only=True,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 500
+load = None
+
+batch_size = None
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/video.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/video.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a068a53ecf0897e0ab7d3f32e18392b7d4ab16d
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-1/train/video.py
@@ -0,0 +1,68 @@
+# Define dataset
+dataset = dict(
+ type="VariableVideoTextDataset",
+ data_path=None,
+ num_frames=None,
+ frame_interval=3,
+ image_size=(None, None),
+ transform_name="resize_crop",
+)
+bucket_config = { # 6s/it
+ "240p": {16: (1.0, 16), 32: (1.0, 8), 64: (1.0, 4), 128: (1.0, 2)},
+ "256": {1: (1.0, 256)},
+ "512": {1: (0.5, 80)},
+ "480p": {1: (0.4, 52), 16: (0.4, 4), 32: (0.0, None)},
+ "720p": {16: (0.1, 2), 32: (0.0, None)}, # No examples now
+ "1024": {1: (0.3, 20)},
+ "1080p": {1: (0.3, 8)},
+}
+
+# Define acceleration
+num_workers = 4
+num_bucket_build_workers = 16
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="STDiT2-XL/2",
+ from_pretrained=None,
+ input_sq_size=512, # pretrained model is trained on 512x512
+ qk_norm=True,
+ qk_norm_legacy=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=4,
+ local_files_only=True,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=200,
+ shardformer=True,
+ local_files_only=True,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 500
+load = None
+
+batch_size = 10 # only for logging
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/inference/sample.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/inference/sample.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e2c623ea181de445a4ee89e865e2ff134e47461
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/inference/sample.py
@@ -0,0 +1,42 @@
+resolution = "240p"
+aspect_ratio = "9:16"
+num_frames = 51
+fps = 24
+frame_interval = 1
+save_fps = 24
+
+save_dir = "./samples/samples/"
+seed = 42
+batch_size = 1
+multi_resolution = "STDiT2"
+dtype = "bf16"
+condition_frame_length = 5
+align = 5
+
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained="hpcai-tech/OpenSora-STDiT-v3",
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="OpenSoraVAE_V1_2",
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
+ micro_frame_size=17,
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=300,
+)
+scheduler = dict(
+ type="rflow",
+ use_timestep_transform=True,
+ num_sampling_steps=30,
+ cfg_scale=7.0,
+)
+
+aes = 6.5
+flow = None
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/inference/sample_hf.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/inference/sample_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e8425124440d35eea36ab2938296b0d302248e8
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/inference/sample_hf.py
@@ -0,0 +1,44 @@
+resolution = "240p"
+aspect_ratio = "9:16"
+num_frames = 51
+fps = 24
+frame_interval = 1
+save_fps = 24
+
+save_dir = "./samples/samples/"
+seed = 42
+batch_size = 1
+multi_resolution = "STDiT2"
+dtype = "bf16"
+condition_frame_length = 5
+align = 5
+
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained="hpcai-tech/OpenSora-STDiT-v3",
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ force_huggingface=True,
+)
+vae = dict(
+ type="OpenSoraVAE_V1_2",
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
+ micro_frame_size=17,
+ micro_batch_size=4,
+ force_huggingface=True,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=300,
+)
+scheduler = dict(
+ type="rflow",
+ use_timestep_transform=True,
+ num_sampling_steps=30,
+ cfg_scale=7.0,
+)
+
+aes = 6.5
+flow = None
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/misc/bs.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/misc/bs.py
new file mode 100644
index 0000000000000000000000000000000000000000..8af6d667707a9c9f16e134268811910f7ad3ea94
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/misc/bs.py
@@ -0,0 +1,117 @@
+# Dataset settings
+dataset = dict(
+ type="VariableVideoTextDataset",
+ transform_name="resize_crop",
+)
+
+# == Config 1: Webvid ==
+# base: (512, 408), 12s/it
+grad_checkpoint = True
+base = ("512", "408")
+base_step_time = 12
+bucket_config = {
+ "144p": {
+ 1: (475, 0),
+ 51: (51, 0),
+ 102: (27, 0),
+ 204: (13, 0),
+ 408: (6, 0),
+ },
+ # ---
+ "240p": {
+ 1: (297, 200), # 8.25
+ 51: (20, 0),
+ 102: (10, 0),
+ 204: (5, 0),
+ 408: (2, 0),
+ },
+ # ---
+ "512": {
+ 1: (141, 0),
+ 51: (8, 0),
+ 102: (4, 0),
+ 204: (2, 0),
+ 408: (1, 0),
+ },
+ # ---
+ "480p": {
+ 1: (89, 0),
+ 51: (5, 0),
+ 102: (2, 0),
+ 204: (1, 0),
+ },
+ # ---
+ "1024": {
+ 1: (36, 0),
+ 51: (1, 0),
+ },
+ # ---
+ "1080p": {1: (5, 0)},
+ # ---
+ "2048": {1: (5, 0)},
+}
+
+# == Config 1 ==
+# base: (512, 408), 16s/it
+
+# Acceleration settings
+num_workers = 8
+num_bucket_build_workers = 16
+dtype = "bf16"
+plugin = "zero2"
+
+# Model settings
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained=None,
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="OpenSoraVAE_V1_2",
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
+ micro_frame_size=17,
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=300,
+ shardformer=True,
+ local_files_only=True,
+)
+scheduler = dict(
+ type="rflow",
+ use_timestep_transform=True,
+ sample_method="logit-normal",
+)
+
+# Mask settings
+mask_ratios = {
+ "random": 0.2,
+ "intepolate": 0.01,
+ "quarter_random": 0.01,
+ "quarter_head": 0.01,
+ "quarter_tail": 0.01,
+ "quarter_head_tail": 0.01,
+ "image_random": 0.05,
+ "image_head": 0.1,
+ "image_tail": 0.05,
+ "image_head_tail": 0.05,
+}
+
+# Log settings
+seed = 42
+outputs = "outputs"
+wandb = False
+epochs = 1000
+log_every = 10
+ckpt_every = 500
+
+# optimization settings
+load = None
+grad_clip = 1.0
+lr = 2e-4
+ema_decay = 0.99
+adam_eps = 1e-15
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/misc/eval_loss.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/misc/eval_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..75c65dbe92f44805ad9757d5de2d161aa082f348
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/misc/eval_loss.py
@@ -0,0 +1,49 @@
+num_workers = 8
+dtype = "bf16"
+seed = 42
+num_eval_timesteps = 10
+
+# Dataset settings
+dataset = dict(
+ type="VariableVideoTextDataset",
+ transform_name="resize_crop",
+)
+
+bucket_config = {
+ "144p": {1: (None, 100), 51: (None, 30), 102: (None, 20), 204: (None, 8), 408: (None, 4)},
+ # ---
+ "240p": {1: (None, 100), 51: (None, 24), 102: (None, 12), 204: (None, 4), 408: (None, 2)},
+ # ---
+ "360p": {1: (None, 60), 51: (None, 12), 102: (None, 6), 204: (None, 2), 408: (None, 1)},
+ # ---
+ "480p": {1: (None, 40), 51: (None, 6), 102: (None, 3), 204: (None, 1)},
+ # ---
+ "720p": {1: (None, 20), 51: (None, 2), 102: (None, 1)},
+ # ---
+ "1080p": {1: (None, 10)},
+ # ---
+ "2048": {1: (None, 5)},
+}
+
+# Model settings
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained=None,
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="OpenSoraVAE_V1_2",
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
+ micro_frame_size=17,
+ micro_batch_size=4,
+ local_files_only=True,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=300,
+ local_files_only=True,
+)
+scheduler = dict(type="rflow")
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/misc/extract.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/misc/extract.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d6be2f69f5c949794171f9e95ed3f6dcf4d6717
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/misc/extract.py
@@ -0,0 +1,62 @@
+# Dataset settings
+dataset = dict(
+ type="VariableVideoTextDataset",
+ transform_name="resize_crop",
+)
+
+# webvid
+bucket_config = { # 12s/it
+ "144p": {1: (1.0, 475), 51: (1.0, 51), 102: ((1.0, 0.33), 27), 204: ((1.0, 0.1), 13), 408: ((1.0, 0.1), 6)},
+ # ---
+ "256": {1: (0.4, 297), 51: (0.5, 20), 102: ((0.5, 0.33), 10), 204: ((0.5, 0.1), 5), 408: ((0.5, 0.1), 2)},
+ "240p": {1: (0.3, 297), 51: (0.4, 20), 102: ((0.4, 0.33), 10), 204: ((0.4, 0.1), 5), 408: ((0.4, 0.1), 2)},
+ # ---
+ "360p": {1: (0.2, 141), 51: (0.15, 8), 102: ((0.15, 0.33), 4), 204: ((0.15, 0.1), 2), 408: ((0.15, 0.1), 1)},
+ "512": {1: (0.1, 141)},
+ # ---
+ "480p": {1: (0.1, 89)},
+ # ---
+ "720p": {1: (0.05, 36)},
+ "1024": {1: (0.05, 36)},
+ # ---
+ "1080p": {1: (0.1, 5)},
+ # ---
+ "2048": {1: (0.1, 5)},
+}
+
+# Acceleration settings
+num_workers = 8
+num_bucket_build_workers = 16
+dtype = "bf16"
+seed = 42
+outputs = "outputs"
+wandb = False
+
+
+# Model settings
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained="hpcai-tech/OpenSora-STDiT-v3",
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="OpenSoraVAE_V1_2",
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
+ micro_frame_size=17,
+ micro_batch_size=32,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=300,
+ shardformer=True,
+ local_files_only=True,
+)
+
+# feature extraction settings
+save_text_features = True
+save_compressed_text_features = True
+bin_size = 250 # 1GB, 4195 bins
+log_time = False
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/misc/feat.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/misc/feat.py
new file mode 100644
index 0000000000000000000000000000000000000000..416919480a81cbed56b45befecd3be6648e72796
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/misc/feat.py
@@ -0,0 +1,94 @@
+# Dataset settings
+dataset = dict(
+ type="VariableVideoTextDataset",
+ transform_name="resize_crop",
+ dummy_text_feature=True,
+)
+
+# webvid
+bucket_config = { # 12s/it
+ "144p": {1: (1.0, 475), 51: (1.0, 51), 102: ((1.0, 0.33), 27), 204: ((1.0, 0.1), 13), 408: ((1.0, 0.1), 6)},
+ # ---
+ "256": {1: (0.4, 297), 51: (0.5, 20), 102: ((0.5, 0.33), 10), 204: ((0.5, 0.1), 5), 408: ((0.5, 0.1), 2)},
+ "240p": {1: (0.3, 297), 51: (0.4, 20), 102: ((0.4, 0.33), 10), 204: ((0.4, 0.1), 5), 408: ((0.4, 0.1), 2)},
+ # ---
+ "360p": {1: (0.2, 141), 51: (0.15, 8), 102: ((0.15, 0.33), 4), 204: ((0.15, 0.1), 2), 408: ((0.15, 0.1), 1)},
+ "512": {1: (0.1, 141)},
+ # ---
+ "480p": {1: (0.1, 89)},
+ # ---
+ "720p": {1: (0.05, 36)},
+ "1024": {1: (0.05, 36)},
+ # ---
+ "1080p": {1: (0.1, 5)},
+ # ---
+ "2048": {1: (0.1, 5)},
+}
+
+grad_checkpoint = True
+
+load_text_features = True
+
+# Acceleration settings
+num_workers = 0
+num_bucket_build_workers = 16
+dtype = "bf16"
+plugin = "zero2"
+
+# Model settings
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained=None,
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ freeze_y_embedder=True,
+ skip_y_embedder=True,
+)
+vae = dict(
+ type="OpenSoraVAE_V1_2",
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
+ micro_frame_size=17,
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=300,
+ shardformer=True,
+ local_files_only=True,
+)
+scheduler = dict(
+ type="rflow",
+ use_timestep_transform=True,
+ sample_method="logit-normal",
+)
+
+# Mask settings
+mask_ratios = {
+ "random": 0.2,
+ "intepolate": 0.01,
+ "quarter_random": 0.01,
+ "quarter_head": 0.01,
+ "quarter_tail": 0.01,
+ "quarter_head_tail": 0.01,
+ "image_random": 0.05,
+ "image_head": 0.1,
+ "image_tail": 0.05,
+ "image_head_tail": 0.05,
+}
+
+# Log settings
+seed = 42
+outputs = "outputs"
+wandb = False
+epochs = 1000
+log_every = 10
+ckpt_every = 1
+
+# optimization settings
+load = None
+grad_clip = 1.0
+lr = 2e-4
+ema_decay = 0.99
+adam_eps = 1e-15
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/adapt.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/adapt.py
new file mode 100644
index 0000000000000000000000000000000000000000..b88eee8a26f4fa750560f259784a6f9442a52e83
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/adapt.py
@@ -0,0 +1,83 @@
+# Dataset settings
+dataset = dict(
+ type="VariableVideoTextDataset",
+ transform_name="resize_crop",
+)
+bucket_config = { # 2s/it
+ "144p": {1: (0.5, 48), 34: (1.0, 2), 51: (1.0, 4), 102: (1.0, 2), 204: (1.0, 1)},
+ # ---
+ "256": {1: (0.6, 20), 34: (0.5, 2), 51: (0.5, 1), 68: (0.5, 1), 136: (0.0, None)},
+ "240p": {1: (0.6, 20), 34: (0.5, 2), 51: (0.5, 1), 68: (0.5, 1), 136: (0.0, None)},
+ # ---
+ "360p": {1: (0.5, 8), 34: (0.2, 1), 102: (0.0, None)},
+ "512": {1: (0.5, 8), 34: (0.2, 1), 102: (0.0, None)},
+ # ---
+ "480p": {1: (0.2, 4), 17: (0.3, 1), 68: (0.0, None)},
+ # ---
+ "720p": {1: (0.1, 2)},
+ "1024": {1: (0.1, 2)},
+ # ---
+ "1080p": {1: (0.1, 1)},
+}
+grad_checkpoint = False
+
+# Acceleration settings
+num_workers = 8
+num_bucket_build_workers = 16
+dtype = "bf16"
+plugin = "zero2"
+
+# Model settings
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained=None,
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="OpenSoraVAE_V1_2",
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
+ micro_frame_size=17,
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=300,
+ shardformer=True,
+)
+scheduler = dict(
+ type="rflow",
+ use_timestep_transform=True,
+ sample_method="logit-normal",
+)
+
+# Mask settings
+mask_ratios = {
+ "random": 0.2,
+ "intepolate": 0.01,
+ "quarter_random": 0.01,
+ "quarter_head": 0.01,
+ "quarter_tail": 0.01,
+ "quarter_head_tail": 0.01,
+ "image_random": 0.05,
+ "image_head": 0.1,
+ "image_tail": 0.05,
+ "image_head_tail": 0.05,
+}
+
+# Log settings
+seed = 42
+outputs = "outputs"
+wandb = False
+epochs = 1000
+log_every = 10
+ckpt_every = 500
+
+# optimization settings
+load = None
+grad_clip = 1.0
+lr = 1e-4
+ema_decay = 0.99
+adam_eps = 1e-15
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/demo_360p.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/demo_360p.py
new file mode 100644
index 0000000000000000000000000000000000000000..f49a00e38de3a231976ce508711e582342cc2895
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/demo_360p.py
@@ -0,0 +1,58 @@
+# Dataset settings
+dataset = dict(
+ type="VariableVideoTextDataset",
+ transform_name="resize_crop",
+)
+
+# webvid
+bucket_config = {"360p": {102: (1.0, 1)}}
+grad_checkpoint = True
+
+# Acceleration settings
+num_workers = 8
+num_bucket_build_workers = 16
+dtype = "bf16"
+plugin = "zero2"
+
+# Model settings
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained=None,
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ freeze_y_embedder=True,
+)
+vae = dict(
+ type="OpenSoraVAE_V1_2",
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
+ micro_frame_size=17,
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=300,
+ shardformer=True,
+)
+scheduler = dict(
+ type="rflow",
+ use_timestep_transform=True,
+ sample_method="logit-normal",
+)
+
+# Log settings
+seed = 42
+outputs = "outputs"
+wandb = False
+epochs = 1000
+log_every = 10
+ckpt_every = 200
+
+# optimization settings
+load = None
+grad_clip = 1.0
+lr = 1e-4
+ema_decay = 0.99
+adam_eps = 1e-15
+warmup_steps = 1000
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/demo_480p.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/demo_480p.py
new file mode 100644
index 0000000000000000000000000000000000000000..08121c7b667da75eadcf988b30959c29a28aa963
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/demo_480p.py
@@ -0,0 +1,58 @@
+# Dataset settings
+dataset = dict(
+ type="VariableVideoTextDataset",
+ transform_name="resize_crop",
+)
+
+# webvid
+bucket_config = {"480p": {51: (0.5, 5)}}
+grad_checkpoint = True
+
+# Acceleration settings
+num_workers = 8
+num_bucket_build_workers = 16
+dtype = "bf16"
+plugin = "zero2"
+
+# Model settings
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained=None,
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ freeze_y_embedder=True,
+)
+vae = dict(
+ type="OpenSoraVAE_V1_2",
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
+ micro_frame_size=17,
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=300,
+ shardformer=True,
+)
+scheduler = dict(
+ type="rflow",
+ use_timestep_transform=True,
+ sample_method="logit-normal",
+)
+
+# Log settings
+seed = 42
+outputs = "outputs"
+wandb = False
+epochs = 1000
+log_every = 10
+ckpt_every = 200
+
+# optimization settings
+load = None
+grad_clip = 1.0
+lr = 1e-4
+ema_decay = 0.99
+adam_eps = 1e-15
+warmup_steps = 1000
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/stage1.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/stage1.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1f1e7c06ea41d234476d459226e0092203de634
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/stage1.py
@@ -0,0 +1,110 @@
+# Dataset settings
+dataset = dict(
+ type="VariableVideoTextDataset",
+ transform_name="resize_crop",
+)
+
+# backup
+# bucket_config = { # 20s/it
+# "144p": {1: (1.0, 100), 51: (1.0, 30), 102: (1.0, 20), 204: (1.0, 8), 408: (1.0, 4)},
+# # ---
+# "256": {1: (0.5, 100), 51: (0.3, 24), 102: (0.3, 12), 204: (0.3, 4), 408: (0.3, 2)},
+# "240p": {1: (0.5, 100), 51: (0.3, 24), 102: (0.3, 12), 204: (0.3, 4), 408: (0.3, 2)},
+# # ---
+# "360p": {1: (0.5, 60), 51: (0.3, 12), 102: (0.3, 6), 204: (0.3, 2), 408: (0.3, 1)},
+# "512": {1: (0.5, 60), 51: (0.3, 12), 102: (0.3, 6), 204: (0.3, 2), 408: (0.3, 1)},
+# # ---
+# "480p": {1: (0.5, 40), 51: (0.3, 6), 102: (0.3, 3), 204: (0.3, 1), 408: (0.0, None)},
+# # ---
+# "720p": {1: (0.2, 20), 51: (0.3, 2), 102: (0.3, 1), 204: (0.0, None)},
+# "1024": {1: (0.1, 20), 51: (0.3, 2), 102: (0.3, 1), 204: (0.0, None)},
+# # ---
+# "1080p": {1: (0.1, 10)},
+# # ---
+# "2048": {1: (0.1, 5)},
+# }
+
+# webvid
+bucket_config = { # 12s/it
+ "144p": {1: (1.0, 475), 51: (1.0, 51), 102: ((1.0, 0.33), 27), 204: ((1.0, 0.1), 13), 408: ((1.0, 0.1), 6)},
+ # ---
+ "256": {1: (0.4, 297), 51: (0.5, 20), 102: ((0.5, 0.33), 10), 204: ((0.5, 0.1), 5), 408: ((0.5, 0.1), 2)},
+ "240p": {1: (0.3, 297), 51: (0.4, 20), 102: ((0.4, 0.33), 10), 204: ((0.4, 0.1), 5), 408: ((0.4, 0.1), 2)},
+ # ---
+ "360p": {1: (0.2, 141), 51: (0.15, 8), 102: ((0.15, 0.33), 4), 204: ((0.15, 0.1), 2), 408: ((0.15, 0.1), 1)},
+ "512": {1: (0.1, 141)},
+ # ---
+ "480p": {1: (0.1, 89)},
+ # ---
+ "720p": {1: (0.05, 36)},
+ "1024": {1: (0.05, 36)},
+ # ---
+ "1080p": {1: (0.1, 5)},
+ # ---
+ "2048": {1: (0.1, 5)},
+}
+
+grad_checkpoint = True
+
+# Acceleration settings
+num_workers = 8
+num_bucket_build_workers = 16
+dtype = "bf16"
+plugin = "zero2"
+
+# Model settings
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained=None,
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ freeze_y_embedder=True,
+)
+vae = dict(
+ type="OpenSoraVAE_V1_2",
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
+ micro_frame_size=17,
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=300,
+ shardformer=True,
+)
+scheduler = dict(
+ type="rflow",
+ use_timestep_transform=True,
+ sample_method="logit-normal",
+)
+
+# Mask settings
+mask_ratios = {
+ "random": 0.05,
+ "intepolate": 0.005,
+ "quarter_random": 0.005,
+ "quarter_head": 0.005,
+ "quarter_tail": 0.005,
+ "quarter_head_tail": 0.005,
+ "image_random": 0.025,
+ "image_head": 0.05,
+ "image_tail": 0.025,
+ "image_head_tail": 0.025,
+}
+
+# Log settings
+seed = 42
+outputs = "outputs"
+wandb = False
+epochs = 1000
+log_every = 10
+ckpt_every = 200
+
+# optimization settings
+load = None
+grad_clip = 1.0
+lr = 1e-4
+ema_decay = 0.99
+adam_eps = 1e-15
+warmup_steps = 1000
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/stage1_feat.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/stage1_feat.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0414fc8c5218ad6847275e585bbb0985d0fa841
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/stage1_feat.py
@@ -0,0 +1,59 @@
+# Dataset settings
+dataset = dict(type="BatchFeatureDataset")
+grad_checkpoint = True
+num_workers = 4
+
+# Acceleration settings
+dtype = "bf16"
+plugin = "zero2"
+
+# Model settings
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained=None,
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ freeze_y_embedder=True,
+ skip_y_embedder=True,
+)
+scheduler = dict(
+ type="rflow",
+ use_timestep_transform=True,
+ sample_method="logit-normal",
+)
+
+vae_out_channels = 4
+model_max_length = 300
+text_encoder_output_dim = 4096
+load_video_features = True
+load_text_features = True
+
+# Mask settings
+mask_ratios = {
+ "random": 0.2,
+ "intepolate": 0.01,
+ "quarter_random": 0.01,
+ "quarter_head": 0.01,
+ "quarter_tail": 0.01,
+ "quarter_head_tail": 0.01,
+ "image_random": 0.05,
+ "image_head": 0.1,
+ "image_tail": 0.05,
+ "image_head_tail": 0.05,
+}
+
+# Log settings
+seed = 42
+outputs = "outputs"
+wandb = False
+epochs = 1000
+log_every = 10
+ckpt_every = 500
+
+# optimization settings
+load = None
+grad_clip = 1.0
+lr = 2e-4
+ema_decay = 0.99
+adam_eps = 1e-15
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/stage2.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/stage2.py
new file mode 100644
index 0000000000000000000000000000000000000000..86200662ce75680b8b708fd35c0fb775e09ad2f3
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/stage2.py
@@ -0,0 +1,91 @@
+# Dataset settings
+dataset = dict(
+ type="VariableVideoTextDataset",
+ transform_name="resize_crop",
+)
+
+# webvid
+bucket_config = { # 12s/it
+ "144p": {1: (1.0, 475), 51: (1.0, 51), 102: ((1.0, 0.33), 27), 204: ((1.0, 0.1), 13), 408: ((1.0, 0.1), 6)},
+ # ---
+ "256": {1: (0.4, 297), 51: (0.5, 20), 102: ((0.5, 0.33), 10), 204: ((0.5, 1.0), 5), 408: ((0.5, 1.0), 2)},
+ "240p": {1: (0.3, 297), 51: (0.4, 20), 102: ((0.4, 0.33), 10), 204: ((0.4, 1.0), 5), 408: ((0.4, 1.0), 2)},
+ # ---
+ "360p": {1: (0.5, 141), 51: (0.15, 8), 102: ((0.3, 0.5), 4), 204: ((0.3, 1.0), 2), 408: ((0.5, 0.5), 1)},
+ "512": {1: (0.4, 141), 51: (0.15, 8), 102: ((0.2, 0.4), 4), 204: ((0.2, 1.0), 2), 408: ((0.4, 0.5), 1)},
+ # ---
+ "480p": {1: (0.5, 89), 51: (0.2, 5), 102: (0.2, 2), 204: (0.1, 1)},
+ # ---
+ "720p": {1: (0.1, 36), 51: (0.03, 1)},
+ "1024": {1: (0.1, 36), 51: (0.02, 1)},
+ # ---
+ "1080p": {1: (0.01, 5)},
+ # ---
+ "2048": {1: (0.01, 5)},
+}
+
+grad_checkpoint = True
+
+# Acceleration settings
+num_workers = 8
+num_bucket_build_workers = 16
+dtype = "bf16"
+plugin = "zero2"
+
+# Model settings
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained=None,
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ freeze_y_embedder=True,
+)
+vae = dict(
+ type="OpenSoraVAE_V1_2",
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
+ micro_frame_size=17,
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=300,
+ shardformer=True,
+)
+scheduler = dict(
+ type="rflow",
+ use_timestep_transform=True,
+ sample_method="logit-normal",
+)
+
+# Mask settings
+# 25%
+mask_ratios = {
+ "random": 0.005,
+ "intepolate": 0.002,
+ "quarter_random": 0.007,
+ "quarter_head": 0.002,
+ "quarter_tail": 0.002,
+ "quarter_head_tail": 0.002,
+ "image_random": 0.0,
+ "image_head": 0.22,
+ "image_tail": 0.005,
+ "image_head_tail": 0.005,
+}
+
+
+# Log settings
+seed = 42
+outputs = "outputs"
+wandb = False
+epochs = 1000
+log_every = 10
+ckpt_every = 200
+
+# optimization settings
+load = None
+grad_clip = 1.0
+lr = 1e-4
+ema_decay = 0.99
+adam_eps = 1e-15
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/stage3.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/stage3.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c53c3853cff35095fb8d58d4fae96b16d3d9009
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora-v1-2/train/stage3.py
@@ -0,0 +1,91 @@
+# Dataset settings
+dataset = dict(
+ type="VariableVideoTextDataset",
+ transform_name="resize_crop",
+)
+
+# webvid
+bucket_config = { # 20s/it
+ "144p": {1: (1.0, 475), 51: (1.0, 51), 102: (1.0, 27), 204: (1.0, 13), 408: (1.0, 6)},
+ # ---
+ "256": {1: (1.0, 297), 51: (0.5, 20), 102: (0.5, 10), 204: (0.5, 5), 408: ((0.5, 0.5), 2)},
+ "240p": {1: (1.0, 297), 51: (0.5, 20), 102: (0.5, 10), 204: (0.5, 5), 408: ((0.5, 0.4), 2)},
+ # ---
+ "360p": {1: (1.0, 141), 51: (0.5, 8), 102: (0.5, 4), 204: (0.5, 2), 408: ((0.5, 0.3), 1)},
+ "512": {1: (1.0, 141), 51: (0.5, 8), 102: (0.5, 4), 204: (0.5, 2), 408: ((0.5, 0.2), 1)},
+ # ---
+ "480p": {1: (1.0, 89), 51: (0.5, 5), 102: (0.5, 3), 204: ((0.5, 0.5), 1), 408: (0.0, None)},
+ # ---
+ "720p": {1: (0.3, 36), 51: (0.2, 2), 102: (0.1, 1), 204: (0.0, None)},
+ "1024": {1: (0.3, 36), 51: (0.1, 2), 102: (0.1, 1), 204: (0.0, None)},
+ # ---
+ "1080p": {1: (0.1, 5)},
+ # ---
+ "2048": {1: (0.05, 5)},
+}
+
+grad_checkpoint = True
+
+# Acceleration settings
+num_workers = 8
+num_bucket_build_workers = 16
+dtype = "bf16"
+plugin = "zero2"
+
+# Model settings
+model = dict(
+ type="STDiT3-XL/2",
+ from_pretrained=None,
+ qk_norm=True,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ freeze_y_embedder=True,
+)
+vae = dict(
+ type="OpenSoraVAE_V1_2",
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
+ micro_frame_size=17,
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=300,
+ shardformer=True,
+)
+scheduler = dict(
+ type="rflow",
+ use_timestep_transform=True,
+ sample_method="logit-normal",
+)
+
+# Mask settings
+# 25%
+mask_ratios = {
+ "random": 0.01,
+ "intepolate": 0.002,
+ "quarter_random": 0.002,
+ "quarter_head": 0.002,
+ "quarter_tail": 0.002,
+ "quarter_head_tail": 0.002,
+ "image_random": 0.0,
+ "image_head": 0.22,
+ "image_tail": 0.005,
+ "image_head_tail": 0.005,
+}
+
+# Log settings
+seed = 42
+outputs = "outputs"
+wandb = False
+epochs = 1000
+log_every = 10
+ckpt_every = 200
+
+# optimization settings
+load = None
+grad_clip = 1.0
+lr = 1e-4
+ema_decay = 0.99
+adam_eps = 1e-15
+warmup_steps = 1000
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/inference/16x256x256.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/inference/16x256x256.py
new file mode 100644
index 0000000000000000000000000000000000000000..4053e1238fd0e5c4cc4acb76f4d923b0c6a21c66
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/inference/16x256x256.py
@@ -0,0 +1,39 @@
+num_frames = 16
+fps = 24 // 3
+image_size = (256, 256)
+
+# Define model
+model = dict(
+ type="STDiT-XL/2",
+ space_scale=0.5,
+ time_scale=1.0,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ from_pretrained="PRETRAINED_MODEL",
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=4,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=120,
+)
+scheduler = dict(
+ type="iddpm",
+ num_sampling_steps=100,
+ cfg_scale=7.0,
+ cfg_channel=3, # or None
+)
+dtype = "bf16"
+
+# Condition
+prompt_path = "./assets/texts/t2v_samples.txt"
+prompt = None # prompt has higher priority than prompt_path
+
+# Others
+batch_size = 1
+seed = 42
+save_dir = "./samples/samples/"
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/inference/16x512x512-rflow.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/inference/16x512x512-rflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf2381053b0d0abf786ec97bf7ba3539def9f0a1
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/inference/16x512x512-rflow.py
@@ -0,0 +1,35 @@
+num_frames = 16
+fps = 24 // 3
+image_size = (512, 512)
+
+# Define model
+model = dict(
+ type="STDiT-XL/2",
+ space_scale=1.0,
+ time_scale=1.0,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ from_pretrained="PRETRAINED_MODEL",
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=2,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=120,
+)
+scheduler = dict(
+ type="rflow",
+ num_sampling_steps=10,
+ cfg_scale=7.0,
+)
+dtype = "bf16"
+
+# Others
+batch_size = 2
+seed = 42
+prompt_path = "./assets/texts/t2v_samples.txt"
+save_dir = "./outputs/samples/"
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/inference/16x512x512.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/inference/16x512x512.py
new file mode 100644
index 0000000000000000000000000000000000000000..478cb5b482d21f11c42107e477ba4869ddba4b05
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/inference/16x512x512.py
@@ -0,0 +1,35 @@
+num_frames = 16
+fps = 24 // 3
+image_size = (512, 512)
+
+# Define model
+model = dict(
+ type="STDiT-XL/2",
+ space_scale=1.0,
+ time_scale=1.0,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ from_pretrained="PRETRAINED_MODEL",
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=2,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=120,
+)
+scheduler = dict(
+ type="iddpm",
+ num_sampling_steps=100,
+ cfg_scale=7.0,
+)
+dtype = "bf16"
+
+# Others
+batch_size = 2
+seed = 42
+prompt_path = "./assets/texts/t2v_samples.txt"
+save_dir = "./samples/samples/"
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/inference/64x512x512.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/inference/64x512x512.py
new file mode 100644
index 0000000000000000000000000000000000000000..03cce23de5d0c190e6f8baadc74e761cfdd39598
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/inference/64x512x512.py
@@ -0,0 +1,35 @@
+num_frames = 64
+fps = 24 // 2
+image_size = (512, 512)
+
+# Define model
+model = dict(
+ type="STDiT-XL/2",
+ space_scale=1.0,
+ time_scale=2 / 3,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ from_pretrained="PRETRAINED_MODEL",
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=128,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=120,
+)
+scheduler = dict(
+ type="iddpm",
+ num_sampling_steps=100,
+ cfg_scale=7.0,
+)
+dtype = "bf16"
+
+# Others
+batch_size = 1
+seed = 42
+prompt_path = "./assets/texts/t2v_samples.txt"
+save_dir = "./samples/samples/"
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x256x256-mask.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x256x256-mask.py
new file mode 100644
index 0000000000000000000000000000000000000000..19dcae6d10b62b1cba253b6441a7d9044e16fc95
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x256x256-mask.py
@@ -0,0 +1,60 @@
+# Define dataset
+dataset = dict(
+ type="VideoTextDataset",
+ data_path=None,
+ num_frames=16,
+ frame_interval=3,
+ image_size=(256, 256),
+)
+
+# Define acceleration
+num_workers = 4
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="STDiT-XL/2",
+ space_scale=0.5,
+ time_scale=1.0,
+ from_pretrained="PixArt-XL-2-512x512.pth",
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+mask_ratios = {
+ "identity": 0.7,
+ "random": 0.15,
+ "mask_head": 0.05,
+ "mask_tail": 0.05,
+ "mask_head_tail": 0.05,
+}
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=120,
+ shardformer=True,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 1000
+load = None
+
+batch_size = 8
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x256x256-spee-rflow.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x256x256-spee-rflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..966c9d012f971ab885352c107110e4dd9ad0c5fe
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x256x256-spee-rflow.py
@@ -0,0 +1,64 @@
+# Define dataset
+dataset = dict(
+ type="VideoTextDataset",
+ data_path=None,
+ num_frames=16,
+ frame_interval=3,
+ image_size=(256, 256),
+)
+
+# Define acceleration
+num_workers = 4
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="STDiT-XL/2",
+ space_scale=0.5,
+ time_scale=1.0,
+ # from_pretrained="PixArt-XL-2-512x512.pth",
+ # from_pretrained = "/home/zhaowangbo/wangbo/PixArt-alpha/pretrained_models/OpenSora-v1-HQ-16x512x512.pth",
+ # from_pretrained = "OpenSora-v1-HQ-16x512x512.pth",
+ from_pretrained="PRETRAINED_MODEL",
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+# mask_ratios = [0.5, 0.29, 0.07, 0.07, 0.07]
+# mask_ratios = {
+# "identity": 0.9,
+# "random": 0.06,
+# "mask_head": 0.01,
+# "mask_tail": 0.01,
+# "mask_head_tail": 0.02,
+# }
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=120,
+ shardformer=True,
+)
+scheduler = dict(
+ type="rflow",
+ # timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = True
+
+epochs = 1
+log_every = 10
+ckpt_every = 1000
+load = None
+
+batch_size = 16
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x256x256-spee.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x256x256-spee.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b7278997f499523c63ff8d11def91ecab6dcbdb
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x256x256-spee.py
@@ -0,0 +1,60 @@
+# Define dataset
+dataset = dict(
+ type="VideoTextDataset",
+ data_path=None,
+ num_frames=16,
+ frame_interval=3,
+ image_size=(256, 256),
+)
+
+# Define acceleration
+num_workers = 4
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="STDiT-XL/2",
+ space_scale=0.5,
+ time_scale=1.0,
+ from_pretrained="PixArt-XL-2-512x512.pth",
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+mask_ratios = {
+ "identity": 0.5,
+ "random": 0.29,
+ "mask_head": 0.07,
+ "mask_tail": 0.07,
+ "mask_head_tail": 0.07,
+}
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=120,
+ shardformer=True,
+)
+scheduler = dict(
+ type="iddpm-speed",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 1000
+load = None
+
+batch_size = 8
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x256x256.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x256x256.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7a68a8b32f3cfeef931a0b025e2ce4809027b19
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x256x256.py
@@ -0,0 +1,53 @@
+# Define dataset
+dataset = dict(
+ type="VideoTextDataset",
+ data_path=None,
+ num_frames=16,
+ frame_interval=3,
+ image_size=(256, 256),
+)
+
+# Define acceleration
+num_workers = 0
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="STDiT-XL/2",
+ space_scale=0.5,
+ time_scale=1.0,
+ from_pretrained="PixArt-XL-2-512x512.pth",
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=120,
+ shardformer=True,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 1000
+load = None
+
+batch_size = 8
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x512x512.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x512x512.py
new file mode 100644
index 0000000000000000000000000000000000000000..c566fd1b7a80b90f45e48f46c1cacbd0036f0fa9
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/16x512x512.py
@@ -0,0 +1,54 @@
+# Define dataset
+dataset = dict(
+ type="VideoTextDataset",
+ data_path=None,
+ num_frames=16,
+ frame_interval=3,
+ image_size=(512, 512),
+)
+
+# Define acceleration
+num_workers = 4
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="STDiT-XL/2",
+ space_scale=1.0,
+ time_scale=1.0,
+ from_pretrained=None,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=128,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=120,
+ shardformer=True,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 500
+load = None
+
+batch_size = 8
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/360x512x512.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/360x512x512.py
new file mode 100644
index 0000000000000000000000000000000000000000..62bfd1475a61e0668b46307f1d970bc9c9a8e6d0
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/360x512x512.py
@@ -0,0 +1,61 @@
+# Define dataset
+dataset = dict(
+ type="VideoTextDataset",
+ data_path=None,
+ num_frames=360,
+ frame_interval=3,
+ image_size=(512, 512),
+)
+
+# Define acceleration
+num_workers = 4
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define acceleration
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2-seq"
+sp_size = 2
+
+# Define model
+model = dict(
+ type="STDiT-XL/2",
+ space_scale=1.0,
+ time_scale=2 / 3,
+ from_pretrained=None,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ enable_sequence_parallelism=True, # enable sq here
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=128,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=120,
+ shardformer=True,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 250
+load = None
+
+batch_size = 1
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/64x512x512-sp.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/64x512x512-sp.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd34a2a51fa25b64f8ff32bf87aff504af2cec42
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/64x512x512-sp.py
@@ -0,0 +1,54 @@
+# Define dataset
+dataset = dict(
+ type="VideoTextDataset",
+ data_path=None,
+ num_frames=16,
+ frame_interval=3,
+ image_size=(512, 512),
+)
+
+# Define acceleration
+num_workers = 4
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 2
+
+# Define model
+model = dict(
+ type="STDiT-XL/2",
+ space_scale=1.0,
+ time_scale=2 / 3,
+ from_pretrained=None,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+ enable_sequence_parallelism=True, # enable sq here
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=120,
+ shardformer=True,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 1000
+load = None
+
+batch_size = 1
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/64x512x512.py b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/64x512x512.py
new file mode 100644
index 0000000000000000000000000000000000000000..e07f8c1ccf362031c26a23cb78edf400dbb8943e
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/configs/opensora/train/64x512x512.py
@@ -0,0 +1,54 @@
+# Define dataset
+dataset = dict(
+ type="VideoTextDataset",
+ data_path=None,
+ num_frames=64,
+ frame_interval=3,
+ image_size=(512, 512),
+)
+
+# Define acceleration
+num_workers = 4
+dtype = "bf16"
+grad_checkpoint = True
+plugin = "zero2"
+sp_size = 1
+
+# Define model
+model = dict(
+ type="STDiT-XL/2",
+ space_scale=1.0,
+ time_scale=2 / 3,
+ from_pretrained=None,
+ enable_flash_attn=True,
+ enable_layernorm_kernel=True,
+)
+vae = dict(
+ type="VideoAutoencoderKL",
+ from_pretrained="stabilityai/sd-vae-ft-ema",
+ micro_batch_size=64,
+)
+text_encoder = dict(
+ type="t5",
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
+ model_max_length=120,
+ shardformer=True,
+)
+scheduler = dict(
+ type="iddpm",
+ timestep_respacing="",
+)
+
+# Others
+seed = 42
+outputs = "outputs"
+wandb = False
+
+epochs = 1000
+log_every = 10
+ckpt_every = 250
+load = None
+
+batch_size = 4
+lr = 2e-5
+grad_clip = 1.0
diff --git a/exp_code/1_benchmark/Open-Sora_v12/pyproject.toml b/exp_code/1_benchmark/Open-Sora_v12/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..8cb3c761742c6bcd6f6c4d00781afbcfbba32464
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/pyproject.toml
@@ -0,0 +1,16 @@
+[tool.autoflake]
+remove-unused-variables = true
+remove-all-unused-imports = true
+ignore-init-module-imports = true
+
+[tool.isort]
+line_length = 120
+multi_line_output = 3
+include_trailing_comma = true
+ignore_comments = true
+profile = "black"
+honor_noqa = true
+
+[tool.black]
+line-length = 120
+target-version = ["py37", "py38", "py39", "py310"]
diff --git a/exp_code/1_benchmark/Open-Sora_v12/setup.py b/exp_code/1_benchmark/Open-Sora_v12/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9b1d42cfd414d037edde0fca83dd1c7a76ffeb0
--- /dev/null
+++ b/exp_code/1_benchmark/Open-Sora_v12/setup.py
@@ -0,0 +1,90 @@
+from typing import List
+
+from setuptools import find_packages, setup
+
+
+def fetch_requirements(paths) -> List[str]:
+ """
+ This function reads the requirements file.
+
+ Args:
+ path (str): the path to the requirements file.
+
+ Returns:
+ The lines in the requirements file.
+ """
+ if not isinstance(paths, list):
+ paths = [paths]
+ requirements = []
+ for path in paths:
+ with open(path, "r") as fd:
+ requirements += [r.strip() for r in fd.readlines()]
+ return requirements
+
+
+def fetch_readme() -> str:
+ """
+ This function reads the README.md file in the current directory.
+
+ Returns:
+ The lines in the README file.
+ """
+ with open("README.md", encoding="utf-8") as f:
+ return f.read()
+
+
+setup(
+ name="opensora",
+ version="1.2.0",
+ packages=find_packages(
+ exclude=(
+ "assets",
+ "cache",
+ "configs",
+ "docs",
+ "eval",
+ "evaluation_results",
+ "gradio",
+ "logs",
+ "notebooks",
+ "outputs",
+ "pretrained_models",
+ "samples",
+ "scripts",
+ "tests",
+ "tools",
+ "*.egg-info",
+ )
+ ),
+ description="Democratizing Efficient Video Production for All",
+ long_description=fetch_readme(),
+ long_description_content_type="text/markdown",
+ license="Apache Software License 2.0",
+ url="https://github.com/hpcaitech/Open-Sora",
+ project_urls={
+ "Bug Tracker": "https://github.com/hpcaitech/Open-Sora/issues",
+ "Examples": "https://hpcaitech.github.io/Open-Sora/",
+ "Documentation": "https://github.com/hpcaitech/Open-Sora?tab=readme-ov-file",
+ "Github": "https://github.com/hpcaitech/Open-Sora",
+ },
+ install_requires=fetch_requirements("requirements/requirements.txt"),
+ python_requires=">=3.6",
+ classifiers=[
+ "Programming Language :: Python :: 3",
+ "License :: OSI Approved :: Apache Software License",
+ "Environment :: GPU :: NVIDIA CUDA",
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
+ "Topic :: System :: Distributed Computing",
+ ],
+ extras_require={
+ "data": fetch_requirements("requirements/requirements-data.txt"),
+ "eval": fetch_requirements("requirements/requirements-eval.txt"),
+ "vae": fetch_requirements("requirements/requirements-vae.txt"),
+ "full": fetch_requirements(
+ [
+ "requirements/requirements-data.txt",
+ "requirements/requirements-eval.txt",
+ ]
+ ),
+ },
+)