SuperCS commited on
Commit
839dda3
·
verified ·
1 Parent(s): 6c04222

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/16x256x256.py +32 -0
  2. exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/1x1024MS.py +34 -0
  3. exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/1x20481B.py +36 -0
  4. exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/1x2048MS.py +37 -0
  5. exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/1x256x256.py +33 -0
  6. exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/1x512x512-rflow.py +39 -0
  7. exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/1x512x512.py +39 -0
  8. exp_code/1_benchmark/Open-Sora_v12/configs/pixart/train/16x256x256.py +53 -0
  9. exp_code/1_benchmark/Open-Sora_v12/configs/pixart/train/1x2048x2048.py +54 -0
  10. exp_code/1_benchmark/Open-Sora_v12/configs/pixart/train/1x512x512-rflow.py +55 -0
  11. exp_code/1_benchmark/Open-Sora_v12/configs/pixart/train/1x512x512.py +54 -0
  12. exp_code/1_benchmark/Open-Sora_v12/configs/pixart/train/64x512x512.py +55 -0
  13. exp_code/1_benchmark/Open-Sora_v12/configs/vae/inference/image.py +32 -0
  14. exp_code/1_benchmark/Open-Sora_v12/configs/vae/inference/video.py +32 -0
  15. exp_code/1_benchmark/Open-Sora_v12/configs/vae/train/stage1.py +49 -0
  16. exp_code/1_benchmark/Open-Sora_v12/configs/vae/train/stage2.py +49 -0
  17. exp_code/1_benchmark/Open-Sora_v12/configs/vae/train/stage3.py +48 -0
  18. exp_code/1_benchmark/Open-Sora_v12/docs/acceleration.md +59 -0
  19. exp_code/1_benchmark/Open-Sora_v12/docs/commands.md +256 -0
  20. exp_code/1_benchmark/Open-Sora_v12/docs/config.md +320 -0
  21. exp_code/1_benchmark/Open-Sora_v12/docs/data_processing.md +81 -0
  22. exp_code/1_benchmark/Open-Sora_v12/docs/datasets.md +75 -0
  23. exp_code/1_benchmark/Open-Sora_v12/docs/installation.md +214 -0
  24. exp_code/1_benchmark/Open-Sora_v12/docs/report_01.md +49 -0
  25. exp_code/1_benchmark/Open-Sora_v12/docs/report_02.md +117 -0
  26. exp_code/1_benchmark/Open-Sora_v12/docs/report_03.md +160 -0
  27. exp_code/1_benchmark/Open-Sora_v12/docs/structure.md +120 -0
  28. exp_code/1_benchmark/Open-Sora_v12/docs/vae.md +66 -0
  29. exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/README.md +539 -0
  30. exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/READMEv1.1.md +245 -0
  31. exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/acceleration.md +67 -0
  32. exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/commands.md +92 -0
  33. exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/datasets.md +31 -0
  34. exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/report_v1.md +49 -0
  35. exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/report_v2.md +114 -0
  36. exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/report_v3.md +159 -0
  37. exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/structure.md +179 -0
  38. exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/vae.md +59 -0
  39. exp_code/1_benchmark/Open-Sora_v12/eval/README.md +114 -0
  40. exp_code/1_benchmark/Open-Sora_v12/eval/human_eval/generate.sh +23 -0
  41. exp_code/1_benchmark/Open-Sora_v12/eval/human_eval/launch.sh +26 -0
  42. exp_code/1_benchmark/Open-Sora_v12/eval/loss/eval_loss.py +163 -0
  43. exp_code/1_benchmark/Open-Sora_v12/eval/loss/launch.sh +38 -0
  44. exp_code/1_benchmark/Open-Sora_v12/eval/loss/tabulate_rl_loss.py +55 -0
  45. exp_code/1_benchmark/Open-Sora_v12/eval/sample.sh +421 -0
  46. exp_code/1_benchmark/Open-Sora_v12/eval/vae/cal_flolpips.py +89 -0
  47. exp_code/1_benchmark/Open-Sora_v12/eval/vae/cal_lpips.py +99 -0
  48. exp_code/1_benchmark/Open-Sora_v12/eval/vae/cal_psnr.py +92 -0
  49. exp_code/1_benchmark/Open-Sora_v12/eval/vae/cal_ssim.py +119 -0
  50. exp_code/1_benchmark/Open-Sora_v12/eval/vae/eval_common_metric.py +232 -0
exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/16x256x256.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ num_frames = 16
2
+ fps = 8
3
+ image_size = (256, 256)
4
+
5
+ # Define model
6
+ model = dict(
7
+ type="PixArt-XL/2",
8
+ space_scale=0.5,
9
+ time_scale=1.0,
10
+ from_pretrained="outputs/098-F16S3-PixArt-XL-2/epoch7-global_step30000/model_ckpt.pt",
11
+ )
12
+ vae = dict(
13
+ type="VideoAutoencoderKL",
14
+ from_pretrained="stabilityai/sd-vae-ft-ema",
15
+ )
16
+ text_encoder = dict(
17
+ type="t5",
18
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
19
+ model_max_length=120,
20
+ )
21
+ scheduler = dict(
22
+ type="dpm-solver",
23
+ num_sampling_steps=20,
24
+ cfg_scale=7.0,
25
+ )
26
+ dtype = "bf16"
27
+
28
+ # Others
29
+ batch_size = 2
30
+ seed = 42
31
+ prompt_path = "./assets/texts/t2v_samples.txt"
32
+ save_dir = "./samples/samples/"
exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/1x1024MS.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ num_frames = 1
2
+ fps = 1
3
+ image_size = (1920, 512)
4
+ multi_resolution = "PixArtMS"
5
+
6
+ # Define model
7
+ model = dict(
8
+ type="PixArtMS-XL/2",
9
+ space_scale=2.0,
10
+ time_scale=1.0,
11
+ no_temporal_pos_emb=True,
12
+ from_pretrained="PixArt-XL-2-1024-MS.pth",
13
+ )
14
+ vae = dict(
15
+ type="VideoAutoencoderKL",
16
+ from_pretrained="stabilityai/sd-vae-ft-ema",
17
+ )
18
+ text_encoder = dict(
19
+ type="t5",
20
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
21
+ model_max_length=120,
22
+ )
23
+ scheduler = dict(
24
+ type="dpm-solver",
25
+ num_sampling_steps=20,
26
+ cfg_scale=7.0,
27
+ )
28
+ dtype = "bf16"
29
+
30
+ # Others
31
+ batch_size = 2
32
+ seed = 42
33
+ prompt_path = "./assets/texts/t2i_samples.txt"
34
+ save_dir = "./samples/samples/"
exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/1x20481B.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ num_frames = 1
2
+ fps = 1
3
+ image_size = (2560, 1536)
4
+ # image_size = (2048, 2048)
5
+
6
+ model = dict(
7
+ type="PixArt-1B/2",
8
+ from_pretrained="PixArt-1B-2.pth",
9
+ space_scale=4,
10
+ no_temporal_pos_emb=True,
11
+ enable_flash_attn=True,
12
+ enable_layernorm_kernel=True,
13
+ base_size=2048 // 8,
14
+ )
15
+ vae = dict(
16
+ type="VideoAutoencoderKL",
17
+ from_pretrained="PixArt-alpha/pixart_sigma_sdxlvae_T5_diffusers",
18
+ subfolder="vae",
19
+ )
20
+ text_encoder = dict(
21
+ type="t5",
22
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
23
+ model_max_length=300,
24
+ )
25
+ scheduler = dict(
26
+ type="dpm-solver",
27
+ num_sampling_steps=14,
28
+ cfg_scale=4.5,
29
+ )
30
+ dtype = "bf16"
31
+
32
+ # Others
33
+ batch_size = 1
34
+ seed = 42
35
+ prompt_path = "./assets/texts/t2i_sigma.txt"
36
+ save_dir = "./samples/samples/"
exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/1x2048MS.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ num_frames = 1
2
+ fps = 1
3
+ # image_size = (2560, 1536)
4
+ # image_size = (2048, 2048)
5
+
6
+ model = dict(
7
+ type="PixArt-XL/2",
8
+ from_pretrained="PixArt-Sigma-XL-2-2K-MS.pth",
9
+ space_scale=4,
10
+ no_temporal_pos_emb=True,
11
+ enable_flash_attn=True,
12
+ enable_layernorm_kernel=True,
13
+ base_size=2048 // 8,
14
+ )
15
+ vae = dict(
16
+ type="VideoAutoencoderKL",
17
+ from_pretrained="PixArt-alpha/pixart_sigma_sdxlvae_T5_diffusers",
18
+ subfolder="vae",
19
+ scaling_factor=0.13025,
20
+ )
21
+ text_encoder = dict(
22
+ type="t5",
23
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
24
+ model_max_length=300,
25
+ )
26
+ scheduler = dict(
27
+ type="dpm-solver",
28
+ num_sampling_steps=14,
29
+ cfg_scale=4.5,
30
+ )
31
+ dtype = "bf16"
32
+
33
+ # Others
34
+ batch_size = 1
35
+ seed = 42
36
+ prompt_path = "./assets/texts/t2i_sigma.txt"
37
+ save_dir = "./samples/samples/"
exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/1x256x256.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ num_frames = 1
2
+ fps = 1
3
+ image_size = (256, 256)
4
+
5
+ # Define model
6
+ model = dict(
7
+ type="PixArt-XL/2",
8
+ space_scale=1.0,
9
+ time_scale=1.0,
10
+ no_temporal_pos_emb=True,
11
+ from_pretrained="PixArt-XL-2-256x256.pth",
12
+ )
13
+ vae = dict(
14
+ type="VideoAutoencoderKL",
15
+ from_pretrained="stabilityai/sd-vae-ft-ema",
16
+ )
17
+ text_encoder = dict(
18
+ type="t5",
19
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
20
+ model_max_length=120,
21
+ )
22
+ scheduler = dict(
23
+ type="dpm-solver",
24
+ num_sampling_steps=20,
25
+ cfg_scale=7.0,
26
+ )
27
+ dtype = "bf16"
28
+
29
+ # Others
30
+ batch_size = 2
31
+ seed = 42
32
+ prompt_path = "./assets/texts/t2i_samples.txt"
33
+ save_dir = "./samples/samples/"
exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/1x512x512-rflow.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ num_frames = 1
2
+ fps = 1
3
+ image_size = (512, 512)
4
+
5
+ # Define model
6
+ model = dict(
7
+ type="PixArt-XL/2",
8
+ space_scale=1.0,
9
+ time_scale=1.0,
10
+ no_temporal_pos_emb=True,
11
+ from_pretrained="PRETRAINED_MODEL",
12
+ )
13
+ vae = dict(
14
+ type="VideoAutoencoderKL",
15
+ from_pretrained="stabilityai/sd-vae-ft-ema",
16
+ )
17
+ text_encoder = dict(
18
+ type="t5",
19
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
20
+ model_max_length=120,
21
+ )
22
+ scheduler = dict(
23
+ type="rflow",
24
+ num_sampling_steps=20,
25
+ cfg_scale=7.0,
26
+ )
27
+ dtype = "bf16"
28
+
29
+ # prompt_path = "./assets/texts/t2i_samples.txt"
30
+ prompt = [
31
+ "Pirate ship trapped in a cosmic maelstrom nebula.",
32
+ "A small cactus with a happy face in the Sahara desert.",
33
+ "A small cactus with a sad face in the Sahara desert.",
34
+ ]
35
+
36
+ # Others
37
+ batch_size = 2
38
+ seed = 42
39
+ save_dir = "./outputs/samples2/"
exp_code/1_benchmark/Open-Sora_v12/configs/pixart/inference/1x512x512.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ num_frames = 1
2
+ fps = 1
3
+ image_size = (512, 512)
4
+
5
+ # Define model
6
+ model = dict(
7
+ type="PixArt-XL/2",
8
+ space_scale=1.0,
9
+ time_scale=1.0,
10
+ no_temporal_pos_emb=True,
11
+ from_pretrained="PixArt-XL-2-512x512.pth",
12
+ )
13
+ vae = dict(
14
+ type="VideoAutoencoderKL",
15
+ from_pretrained="stabilityai/sd-vae-ft-ema",
16
+ )
17
+ text_encoder = dict(
18
+ type="t5",
19
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
20
+ model_max_length=120,
21
+ )
22
+ scheduler = dict(
23
+ type="dpm-solver",
24
+ num_sampling_steps=20,
25
+ cfg_scale=7.0,
26
+ )
27
+ dtype = "bf16"
28
+
29
+ # prompt_path = "./assets/texts/t2i_samples.txt"
30
+ prompt = [
31
+ "Pirate ship trapped in a cosmic maelstrom nebula.",
32
+ "A small cactus with a happy face in the Sahara desert.",
33
+ "A small cactus with a sad face in the Sahara desert.",
34
+ ]
35
+
36
+ # Others
37
+ batch_size = 2
38
+ seed = 42
39
+ save_dir = "./samples/samples/"
exp_code/1_benchmark/Open-Sora_v12/configs/pixart/train/16x256x256.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define dataset
2
+ dataset = dict(
3
+ type="VideoTextDataset",
4
+ data_path=None,
5
+ num_frames=16,
6
+ frame_interval=3,
7
+ image_size=(256, 256),
8
+ )
9
+
10
+ # Define acceleration
11
+ num_workers = 4
12
+ dtype = "bf16"
13
+ grad_checkpoint = True
14
+ plugin = "zero2"
15
+ sp_size = 1
16
+
17
+ # Define model
18
+ model = dict(
19
+ type="PixArt-XL/2",
20
+ space_scale=0.5,
21
+ time_scale=1.0,
22
+ from_pretrained="PixArt-XL-2-512x512.pth",
23
+ enable_flash_attn=True,
24
+ enable_layernorm_kernel=True,
25
+ )
26
+ vae = dict(
27
+ type="VideoAutoencoderKL",
28
+ from_pretrained="stabilityai/sd-vae-ft-ema",
29
+ )
30
+ text_encoder = dict(
31
+ type="t5",
32
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
33
+ model_max_length=120,
34
+ shardformer=True,
35
+ )
36
+ scheduler = dict(
37
+ type="iddpm",
38
+ timestep_respacing="",
39
+ )
40
+
41
+ # Others
42
+ seed = 42
43
+ outputs = "outputs"
44
+ wandb = False
45
+
46
+ epochs = 1000
47
+ log_every = 10
48
+ ckpt_every = 1000
49
+ load = None
50
+
51
+ batch_size = 8
52
+ lr = 2e-5
53
+ grad_clip = 1.0
exp_code/1_benchmark/Open-Sora_v12/configs/pixart/train/1x2048x2048.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define dataset
2
+ dataset = dict(
3
+ type="VideoTextDataset",
4
+ data_path="/home/zhaowangbo/data/csv/image-v1_1_ext_noempty_rcp_clean_info.csv",
5
+ num_frames=1,
6
+ frame_interval=3,
7
+ image_size=(2048, 2048),
8
+ )
9
+
10
+ # Define acceleration
11
+ num_workers = 4
12
+ dtype = "bf16"
13
+ grad_checkpoint = True
14
+ plugin = "zero2"
15
+ sp_size = 1
16
+
17
+ # Define model
18
+ model = dict(
19
+ type="PixArt-1B/2",
20
+ space_scale=4.0,
21
+ no_temporal_pos_emb=True,
22
+ from_pretrained="PixArt-1B-2.pth",
23
+ enable_flash_attn=True,
24
+ enable_layernorm_kernel=True,
25
+ )
26
+
27
+ vae = dict(
28
+ type="VideoAutoencoderKL",
29
+ from_pretrained="PixArt-alpha/pixart_sigma_sdxlvae_T5_diffusers",
30
+ subfolder="vae",
31
+ )
32
+ text_encoder = dict(
33
+ type="t5",
34
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
35
+ model_max_length=300,
36
+ )
37
+ scheduler = dict(
38
+ type="iddpm",
39
+ timestep_respacing="",
40
+ )
41
+
42
+ # Others
43
+ seed = 42
44
+ outputs = "outputs"
45
+ wandb = False
46
+
47
+ epochs = 1000
48
+ log_every = 10
49
+ ckpt_every = 1000
50
+ load = None
51
+
52
+ batch_size = 4
53
+ lr = 2e-5
54
+ grad_clip = 1.0
exp_code/1_benchmark/Open-Sora_v12/configs/pixart/train/1x512x512-rflow.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define dataset
2
+ dataset = dict(
3
+ type="VideoTextDataset",
4
+ data_path=None,
5
+ num_frames=1,
6
+ frame_interval=3,
7
+ image_size=(512, 512),
8
+ )
9
+
10
+ # Define acceleration
11
+ num_workers = 4
12
+ dtype = "bf16"
13
+ grad_checkpoint = True
14
+ plugin = "zero2"
15
+ sp_size = 1
16
+
17
+ # Define model
18
+ model = dict(
19
+ type="PixArt-XL/2",
20
+ space_scale=1.0,
21
+ time_scale=1.0,
22
+ no_temporal_pos_emb=True,
23
+ # from_pretrained="PixArt-XL-2-512x512.pth",
24
+ from_pretrained="PRETRAINED_MODEL",
25
+ enable_flash_attn=True,
26
+ enable_layernorm_kernel=True,
27
+ )
28
+ vae = dict(
29
+ type="VideoAutoencoderKL",
30
+ from_pretrained="stabilityai/sd-vae-ft-ema",
31
+ )
32
+ text_encoder = dict(
33
+ type="t5",
34
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
35
+ model_max_length=120,
36
+ shardformer=True,
37
+ )
38
+ scheduler = dict(
39
+ type="rflow",
40
+ # timestep_respacing="",
41
+ )
42
+
43
+ # Others
44
+ seed = 42
45
+ outputs = "outputs"
46
+ wandb = True
47
+
48
+ epochs = 2
49
+ log_every = 10
50
+ ckpt_every = 1000
51
+ load = None
52
+
53
+ batch_size = 64
54
+ lr = 2e-5
55
+ grad_clip = 1.0
exp_code/1_benchmark/Open-Sora_v12/configs/pixart/train/1x512x512.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define dataset
2
+ dataset = dict(
3
+ type="VideoTextDataset",
4
+ data_path=None,
5
+ num_frames=1,
6
+ frame_interval=3,
7
+ image_size=(512, 512),
8
+ )
9
+
10
+ # Define acceleration
11
+ num_workers = 4
12
+ dtype = "bf16"
13
+ grad_checkpoint = True
14
+ plugin = "zero2"
15
+ sp_size = 1
16
+
17
+ # Define model
18
+ model = dict(
19
+ type="PixArt-XL/2",
20
+ space_scale=1.0,
21
+ time_scale=1.0,
22
+ no_temporal_pos_emb=True,
23
+ from_pretrained="PixArt-XL-2-512x512.pth",
24
+ enable_flash_attn=True,
25
+ enable_layernorm_kernel=True,
26
+ )
27
+ vae = dict(
28
+ type="VideoAutoencoderKL",
29
+ from_pretrained="stabilityai/sd-vae-ft-ema",
30
+ )
31
+ text_encoder = dict(
32
+ type="t5",
33
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
34
+ model_max_length=120,
35
+ shardformer=True,
36
+ )
37
+ scheduler = dict(
38
+ type="iddpm",
39
+ timestep_respacing="",
40
+ )
41
+
42
+ # Others
43
+ seed = 42
44
+ outputs = "outputs"
45
+ wandb = False
46
+
47
+ epochs = 1000
48
+ log_every = 10
49
+ ckpt_every = 1000
50
+ load = None
51
+
52
+ batch_size = 32
53
+ lr = 2e-5
54
+ grad_clip = 1.0
exp_code/1_benchmark/Open-Sora_v12/configs/pixart/train/64x512x512.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define dataset
2
+ dataset = dict(
3
+ type="VideoTextDataset",
4
+ data_path=None,
5
+ num_frames=64,
6
+ frame_interval=3,
7
+ image_size=(256, 256),
8
+ )
9
+
10
+ # Define acceleration
11
+ num_workers = 4
12
+ dtype = "bf16"
13
+ grad_checkpoint = True
14
+ plugin = "zero2"
15
+ sp_size = 1
16
+
17
+
18
+ # Define model
19
+ model = dict(
20
+ type="PixArt-XL/2",
21
+ space_scale=1.0,
22
+ time_scale=2 / 3,
23
+ from_pretrained=None,
24
+ enable_flash_attn=True,
25
+ enable_layernorm_kernel=True,
26
+ )
27
+ vae = dict(
28
+ type="VideoAutoencoderKL",
29
+ from_pretrained="stabilityai/sd-vae-ft-ema",
30
+ micro_batch_size=128,
31
+ )
32
+ text_encoder = dict(
33
+ type="t5",
34
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
35
+ model_max_length=120,
36
+ shardformer=True,
37
+ )
38
+ scheduler = dict(
39
+ type="iddpm",
40
+ timestep_respacing="",
41
+ )
42
+
43
+ # Others
44
+ seed = 42
45
+ outputs = "outputs"
46
+ wandb = False
47
+
48
+ epochs = 1000
49
+ log_every = 10
50
+ ckpt_every = 250
51
+ load = None
52
+
53
+ batch_size = 4
54
+ lr = 2e-5
55
+ grad_clip = 1.0
exp_code/1_benchmark/Open-Sora_v12/configs/vae/inference/image.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ image_size = (256, 256)
2
+ num_frames = 1
3
+
4
+ dtype = "bf16"
5
+ batch_size = 1
6
+ seed = 42
7
+ save_dir = "samples/vae_video"
8
+ cal_stats = True
9
+ log_stats_every = 100
10
+
11
+ # Define dataset
12
+ dataset = dict(
13
+ type="VideoTextDataset",
14
+ data_path=None,
15
+ num_frames=num_frames,
16
+ image_size=image_size,
17
+ )
18
+ num_samples = 100
19
+ num_workers = 4
20
+
21
+ # Define model
22
+ model = dict(
23
+ type="OpenSoraVAE_V1_2",
24
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
25
+ micro_frame_size=None,
26
+ micro_batch_size=4,
27
+ cal_loss=True,
28
+ )
29
+
30
+ # loss weights
31
+ perceptual_loss_weight = 0.1 # use vgg is not None and more than 0
32
+ kl_loss_weight = 1e-6
exp_code/1_benchmark/Open-Sora_v12/configs/vae/inference/video.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ image_size = (256, 256)
2
+ num_frames = 51
3
+
4
+ dtype = "bf16"
5
+ batch_size = 1
6
+ seed = 42
7
+ save_dir = "samples/vae_video"
8
+ cal_stats = True
9
+ log_stats_every = 100
10
+
11
+ # Define dataset
12
+ dataset = dict(
13
+ type="VideoTextDataset",
14
+ data_path=None,
15
+ num_frames=num_frames,
16
+ image_size=image_size,
17
+ )
18
+ num_samples = 100
19
+ num_workers = 4
20
+
21
+ # Define model
22
+ model = dict(
23
+ type="OpenSoraVAE_V1_2",
24
+ from_pretrained="hpcai-tech/OpenSora-VAE-v1.2",
25
+ micro_frame_size=None,
26
+ micro_batch_size=4,
27
+ cal_loss=True,
28
+ )
29
+
30
+ # loss weights
31
+ perceptual_loss_weight = 0.1 # use vgg is not None and more than 0
32
+ kl_loss_weight = 1e-6
exp_code/1_benchmark/Open-Sora_v12/configs/vae/train/stage1.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ num_frames = 17
2
+ image_size = (256, 256)
3
+
4
+ # Define dataset
5
+ dataset = dict(
6
+ type="VideoTextDataset",
7
+ data_path=None,
8
+ num_frames=num_frames,
9
+ frame_interval=1,
10
+ image_size=image_size,
11
+ )
12
+
13
+ # Define acceleration
14
+ num_workers = 16
15
+ dtype = "bf16"
16
+ grad_checkpoint = True
17
+ plugin = "zero2"
18
+
19
+ # Define model
20
+ model = dict(
21
+ type="OpenSoraVAE_V1_2",
22
+ freeze_vae_2d=True,
23
+ from_pretrained=None,
24
+ cal_loss=True,
25
+ )
26
+
27
+ # loss weights
28
+ perceptual_loss_weight = 0.1 # use vgg is not None and more than 0
29
+ kl_loss_weight = 1e-6
30
+
31
+ mixed_strategy = "mixed_video_image"
32
+ mixed_image_ratio = 0.2
33
+ use_real_rec_loss = False
34
+ use_z_rec_loss = True
35
+ use_image_identity_loss = True
36
+
37
+ # Others
38
+ seed = 42
39
+ outputs = "outputs/vae_stage1"
40
+ wandb = False
41
+
42
+ epochs = 100 # NOTE: adjust accordingly w.r.t dataset size
43
+ log_every = 1
44
+ ckpt_every = 1000
45
+ load = None
46
+
47
+ batch_size = 1
48
+ lr = 1e-5
49
+ grad_clip = 1.0
exp_code/1_benchmark/Open-Sora_v12/configs/vae/train/stage2.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ num_frames = 17
2
+ image_size = (256, 256)
3
+
4
+ # Define dataset
5
+ dataset = dict(
6
+ type="VideoTextDataset",
7
+ data_path=None,
8
+ num_frames=num_frames,
9
+ frame_interval=1,
10
+ image_size=image_size,
11
+ )
12
+
13
+ # Define acceleration
14
+ num_workers = 16
15
+ dtype = "bf16"
16
+ grad_checkpoint = True
17
+ plugin = "zero2"
18
+
19
+ # Define model
20
+ model = dict(
21
+ type="OpenSoraVAE_V1_2",
22
+ freeze_vae_2d=False,
23
+ from_pretrained="outputs/vae_stage1",
24
+ cal_loss=True,
25
+ )
26
+
27
+ # loss weights
28
+ perceptual_loss_weight = 0.1 # use vgg is not None and more than 0
29
+ kl_loss_weight = 1e-6
30
+
31
+ mixed_strategy = "mixed_video_image"
32
+ mixed_image_ratio = 0.2
33
+ use_real_rec_loss = False
34
+ use_z_rec_loss = True
35
+ use_image_identity_loss = False
36
+
37
+ # Others
38
+ seed = 42
39
+ outputs = "outputs/vae_stage2"
40
+ wandb = False
41
+
42
+ epochs = 100 # NOTE: adjust accordingly w.r.t dataset size
43
+ log_every = 1
44
+ ckpt_every = 1000
45
+ load = None
46
+
47
+ batch_size = 1
48
+ lr = 1e-5
49
+ grad_clip = 1.0
exp_code/1_benchmark/Open-Sora_v12/configs/vae/train/stage3.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ num_frames = 33
2
+ image_size = (256, 256)
3
+
4
+ # Define dataset
5
+ dataset = dict(
6
+ type="VideoTextDataset",
7
+ data_path=None,
8
+ num_frames=num_frames,
9
+ frame_interval=1,
10
+ image_size=image_size,
11
+ )
12
+
13
+ # Define acceleration
14
+ num_workers = 16
15
+ dtype = "bf16"
16
+ grad_checkpoint = True
17
+ plugin = "zero2"
18
+
19
+ # Define model
20
+ model = dict(
21
+ type="OpenSoraVAE_V1_2",
22
+ freeze_vae_2d=False,
23
+ from_pretrained="outputs/vae_stage2",
24
+ cal_loss=True,
25
+ )
26
+
27
+ # loss weights
28
+ perceptual_loss_weight = 0.1 # use vgg is not None and more than 0
29
+ kl_loss_weight = 1e-6
30
+
31
+ mixed_strategy = "mixed_video_random"
32
+ use_real_rec_loss = True
33
+ use_z_rec_loss = False
34
+ use_image_identity_loss = False
35
+
36
+ # Others
37
+ seed = 42
38
+ outputs = "outputs/vae_stage3"
39
+ wandb = False
40
+
41
+ epochs = 100 # NOTE: adjust accordingly w.r.t dataset size
42
+ log_every = 1
43
+ ckpt_every = 1000
44
+ load = None
45
+
46
+ batch_size = 1
47
+ lr = 1e-5
48
+ grad_clip = 1.0
exp_code/1_benchmark/Open-Sora_v12/docs/acceleration.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Acceleration
2
+
3
+ >This document corresponds to our v1.1 release
4
+
5
+ Open-Sora aims to provide a high-speed training framework for diffusion models. We can achieve **55%** training speed acceleration when training on **64 frames 512x512 videos**. Our framework support training **1min 1080p videos**.
6
+
7
+ ## Accelerated Transformer
8
+
9
+ Open-Sora boosts the training speed by:
10
+
11
+ - Kernel optimization including [flash attention](https://github.com/Dao-AILab/flash-attention), fused layernorm kernel, and the ones compiled by colossalAI.
12
+ - Hybrid parallelism including ZeRO.
13
+ - Gradient checkpointing for larger batch size.
14
+
15
+ Our training speed on images is comparable to [OpenDiT](https://github.com/NUS-HPC-AI-Lab/OpenDiT), a project to accelerate DiT training. The training speed is measured on 8 H800 GPUs with batch size 128, image size 256x256.
16
+
17
+ | Model | Throughput (img/s/GPU) | Throughput (tokens/s/GPU) |
18
+ | -------- | ---------------------- | ------------------------- |
19
+ | DiT | 100 | 26k |
20
+ | OpenDiT | 175 | 45k |
21
+ | OpenSora | 175 | 45k |
22
+
23
+ ## Efficient STDiT
24
+
25
+ Our STDiT adopts spatial-temporal attention to model the video data. Compared with directly applying full attention on DiT, our STDiT is more efficient as the number of frames increases. Our current framework only supports sequence parallelism for very long sequence.
26
+
27
+ The training speed is measured on 8 H800 GPUs with acceleration techniques applied, GC means gradient checkpointing. Both with T5 conditioning like PixArt.
28
+
29
+ | Model | Setting | Throughput (sample/s/GPU) | Throughput (tokens/s/GPU) |
30
+ | ---------------- | -------------- | ------------------------- | ------------------------- |
31
+ | DiT | 16x256 (4k) | 7.20 | 29k |
32
+ | STDiT | 16x256 (4k) | 7.00 | 28k |
33
+ | DiT | 16x512 (16k) | 0.85 | 14k |
34
+ | STDiT | 16x512 (16k) | 1.45 | 23k |
35
+ | DiT (GC) | 64x512 (65k) | 0.08 | 5k |
36
+ | STDiT (GC) | 64x512 (65k) | 0.40 | 25k |
37
+ | STDiT (GC, sp=2) | 360x512 (370k) | 0.10 | 18k |
38
+
39
+ With a 4x downsampling in the temporal dimension with Video-VAE, an 24fps video has 450 frames. The gap between the speed of STDiT (28k tokens/s) and DiT on images (up to 45k tokens/s) mainly comes from the T5 and VAE encoding, and temporal attention.
40
+
41
+ ## Accelerated Encoder (T5, VAE)
42
+
43
+ During training, texts are encoded by T5, and videos are encoded by VAE. Typically there are two ways to accelerate the training:
44
+
45
+ 1. Preprocess text and video data in advance and save them to disk.
46
+ 2. Encode text and video data during training, and accelerate the encoding process.
47
+
48
+ For option 1, 120 tokens for one sample require 1M disk space, and a 64x64x64 latent requires 4M. Considering a training dataset with 10M video clips, the total disk space required is 50TB. Our storage system is not ready at this time for this scale of data.
49
+
50
+ For option 2, we boost T5 speed and memory requirement. According to [OpenDiT](https://github.com/NUS-HPC-AI-Lab/OpenDiT), we find VAE consumes a large number of GPU memory. Thus we split batch size into smaller ones for VAE encoding. With both techniques, we can greatly accelerate the training speed.
51
+
52
+ The training speed is measured on 8 H800 GPUs with STDiT.
53
+
54
+ | Acceleration | Setting | Throughput (img/s/GPU) | Throughput (tokens/s/GPU) |
55
+ | ------------ | ------------- | ---------------------- | ------------------------- |
56
+ | Baseline | 16x256 (4k) | 6.16 | 25k |
57
+ | w. faster T5 | 16x256 (4k) | 7.00 | 29k |
58
+ | Baseline | 64x512 (65k) | 0.94 | 15k |
59
+ | w. both | 64x512 (65k) | 1.45 | 23k |
exp_code/1_benchmark/Open-Sora_v12/docs/commands.md ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Commands
2
+
3
+ - [Config](#Config)
4
+ - [Inference](#inference)
5
+ - [Inference with Open-Sora 1.2](#inference-with-open-sora-12)
6
+ - [Inference with Open-Sora 1.1](#inference-with-open-sora-11)
7
+ - [Inference with DiT pretrained on ImageNet](#inference-with-dit-pretrained-on-imagenet)
8
+ - [Inference with Latte pretrained on UCF101](#inference-with-latte-pretrained-on-ucf101)
9
+ - [Inference with PixArt-α pretrained weights](#inference-with-pixart-α-pretrained-weights)
10
+ - [Inference with checkpoints saved during training](#inference-with-checkpoints-saved-during-training)
11
+ - [Inference Hyperparameters](#inference-hyperparameters)
12
+ - [Training](#training)
13
+ - [Training Hyperparameters](#training-hyperparameters)
14
+ - [Search batch size for buckets](#search-batch-size-for-buckets)
15
+
16
+ ## Config
17
+ Note that currently our model loading for vae and diffusion model supports two types:
18
+
19
+ * load from local file path
20
+ * load from huggingface
21
+
22
+ Our config supports loading from huggingface online image by default.
23
+ If you wish to load from a local path downloaded from huggingface image, you need to set `force_huggingface=True`, for instance:
24
+
25
+ ```python
26
+ # for vae
27
+ vae = dict(
28
+ type="OpenSoraVAE_V1_2",
29
+ from_pretrained="/root/commonData/OpenSora-VAE-v1.2",
30
+ micro_frame_size=17,
31
+ micro_batch_size=4,
32
+ force_huggingface=True, # NOTE: set here
33
+ )
34
+ # for diffusion model
35
+ model = dict(
36
+ type="STDiT3-XL/2",
37
+ from_pretrained="/root/commonData/OpenSora-STDiT-v3",
38
+ qk_norm=True,
39
+ enable_flash_attn=True,
40
+ enable_layernorm_kernel=True,
41
+ force_huggingface=True, # NOTE: set here
42
+ )
43
+ ```
44
+ However, if you want to load a self-trained model, do not set `force_huggingface=True` since your image won't be in huggingface format.
45
+
46
+ ## Inference
47
+
48
+ You can modify corresponding config files to change the inference settings. See more details [here](/docs/structure.md#inference-config-demos).
49
+
50
+ ### Inference with Open-Sora 1.2
51
+
52
+ The inference API is compatible with Open-Sora 1.1. To ease users' experience, we add support to `--resolution` and `--aspect-ratio` options, which is a more user-friendly way to specify the image size.
53
+
54
+ ```bash
55
+ python scripts/inference.py configs/opensora-v1-2/inference/sample.py \
56
+ --resolution 480p --aspect-ratio 9:16
57
+ # equivalent to
58
+ python scripts/inference.py configs/opensora-v1-2/inference/sample.py \
59
+ --image-size 480 853
60
+ ```
61
+
62
+ In this version, we have merged all functions in previous `inference-long.py` into `inference.py`. The command line arguments are the same as before (only note that the frame index and length is calculated with 4x compressed).
63
+
64
+ ### Inference with Open-Sora 1.1
65
+
66
+ Since Open-Sora 1.1 supports inference with dynamic input size, you can pass the input size as an argument.
67
+
68
+ ```bash
69
+ # image sampling with prompt path
70
+ python scripts/inference.py configs/opensora-v1-1/inference/sample.py \
71
+ --ckpt-path CKPT_PATH --prompt-path assets/texts/t2i_samples.txt --num-frames 1 --image-size 1024 1024
72
+
73
+ # image sampling with prompt
74
+ python scripts/inference.py configs/opensora-v1-1/inference/sample.py \
75
+ --ckpt-path CKPT_PATH --prompt "A beautiful sunset over the city" --num-frames 1 --image-size 1024 1024
76
+
77
+ # video sampling
78
+ python scripts/inference.py configs/opensora-v1-1/inference/sample.py \
79
+ --ckpt-path CKPT_PATH --prompt "A beautiful sunset over the city" --num-frames 16 --image-size 480 854
80
+ ```
81
+
82
+ You can adjust the `--num-frames` and `--image-size` to generate different results. We recommend you to use the same image size as the training resolution, which is defined in [aspect.py](/opensora/datasets/aspect.py). Some examples are shown below.
83
+
84
+ - 240p
85
+ - 16:9 240x426
86
+ - 3:4 276x368
87
+ - 1:1 320x320
88
+ - 480p
89
+ - 16:9 480x854
90
+ - 3:4 554x738
91
+ - 1:1 640x640
92
+ - 720p
93
+ - 16:9 720x1280
94
+ - 3:4 832x1110
95
+ - 1:1 960x960
96
+
97
+ `inference-long.py` is compatible with `inference.py` and supports advanced features.
98
+
99
+ ```bash
100
+ # image condition
101
+ python scripts/inference-long.py configs/opensora-v1-1/inference/sample.py --ckpt-path CKPT_PATH \
102
+ --num-frames 32 --image-size 240 426 --sample-name image-cond \
103
+ --prompt 'A breathtaking sunrise scene.{"reference_path": "assets/images/condition/wave.png","mask_strategy": "0"}'
104
+
105
+ # video extending
106
+ python scripts/inference-long.py configs/opensora-v1-1/inference/sample.py --ckpt-path CKPT_PATH \
107
+ --num-frames 32 --image-size 240 426 --sample-name image-cond \
108
+ --prompt 'A car driving on the ocean.{"reference_path": "https://cdn.openai.com/tmp/s/interp/d0.mp4","mask_strategy": "0,0,0,-8,8"}'
109
+
110
+ # long video generation
111
+ python scripts/inference-long.py configs/opensora-v1-1/inference/sample.py --ckpt-path CKPT_PATH \
112
+ --num-frames 32 --image-size 240 426 --loop 16 --condition-frame-length 8 --sample-name long \
113
+ --prompt '|0|a white jeep equipped with a roof rack driving on a dirt road in a coniferous forest.|2|a white jeep equipped with a roof rack driving on a dirt road in the desert.|4|a white jeep equipped with a roof rack driving on a dirt road in a mountain.|6|A white jeep equipped with a roof rack driving on a dirt road in a city.|8|a white jeep equipped with a roof rack driving on a dirt road on the surface of a river.|10|a white jeep equipped with a roof rack driving on a dirt road under the lake.|12|a white jeep equipped with a roof rack flying into the sky.|14|a white jeep equipped with a roof rack driving in the universe. Earth is the background.{"reference_path": "https://cdn.openai.com/tmp/s/interp/d0.mp4", "mask_strategy": "0,0,0,0,16"}'
114
+
115
+ # video connecting
116
+ python scripts/inference-long.py configs/opensora-v1-1/inference/sample.py --ckpt-path CKPT_PATH \
117
+ --num-frames 32 --image-size 240 426 --sample-name connect \
118
+ --prompt 'A breathtaking sunrise scene.{"reference_path": "assets/images/condition/sunset1.png;assets/images/condition/sunset2.png","mask_strategy": "0;0,1,0,-1,1"}'
119
+
120
+ # video editing
121
+ python scripts/inference-long.py configs/opensora-v1-1/inference/sample.py --ckpt-path CKPT_PATH \
122
+ --num-frames 32 --image-size 480 853 --sample-name edit \
123
+ --prompt 'A cyberpunk-style city at night.{"reference_path": "https://cdn.pixabay.com/video/2021/10/12/91744-636709154_large.mp4","mask_strategy": "0,0,0,0,32,0.4"}'
124
+ ```
125
+
126
+ ### Inference with DiT pretrained on ImageNet
127
+
128
+ The following command automatically downloads the pretrained weights on ImageNet and runs inference.
129
+
130
+ ```bash
131
+ python scripts/inference.py configs/dit/inference/1x256x256-class.py --ckpt-path DiT-XL-2-256x256.pt
132
+ ```
133
+
134
+ ### Inference with Latte pretrained on UCF101
135
+
136
+ The following command automatically downloads the pretrained weights on UCF101 and runs inference.
137
+
138
+ ```bash
139
+ python scripts/inference.py configs/latte/inference/16x256x256-class.py --ckpt-path Latte-XL-2-256x256-ucf101.pt
140
+ ```
141
+
142
+ ### Inference with PixArt-α pretrained weights
143
+
144
+ Download T5 into `./pretrained_models` and run the following command.
145
+
146
+ ```bash
147
+ # 256x256
148
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/pixart/inference/1x256x256.py --ckpt-path PixArt-XL-2-256x256.pth
149
+
150
+ # 512x512
151
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/pixart/inference/1x512x512.py --ckpt-path PixArt-XL-2-512x512.pth
152
+
153
+ # 1024 multi-scale
154
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/pixart/inference/1x1024MS.py --ckpt-path PixArt-XL-2-1024MS.pth
155
+ ```
156
+
157
+ ### Inference with checkpoints saved during training
158
+
159
+ During training, an experiment logging folder is created in `outputs` directory. Under each checkpoint folder, e.g. `epoch12-global_step2000`, there is a `ema.pt` and the shared `model` folder. Run the following command to perform inference.
160
+
161
+ ```bash
162
+ # inference with ema model
163
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path outputs/001-STDiT-XL-2/epoch12-global_step2000/ema.pt
164
+
165
+ # inference with model
166
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path outputs/001-STDiT-XL-2/epoch12-global_step2000
167
+
168
+ # inference with sequence parallelism
169
+ # sequence parallelism is enabled automatically when nproc_per_node is larger than 1
170
+ torchrun --standalone --nproc_per_node 2 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path outputs/001-STDiT-XL-2/epoch12-global_step2000
171
+ ```
172
+
173
+ The second command will automatically generate a `model_ckpt.pt` file in the checkpoint folder.
174
+
175
+ ### Inference Hyperparameters
176
+
177
+ 1. DPM-solver is good at fast inference for images. However, the video result is not satisfactory. You can use it for fast demo purpose.
178
+
179
+ ```python
180
+ type="dmp-solver"
181
+ num_sampling_steps=20
182
+ ```
183
+
184
+ 2. You can use [SVD](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt)'s finetuned VAE decoder on videos for inference (consumes more memory). However, we do not see significant improvement in the video result. To use it, download [the pretrained weights](https://huggingface.co/maxin-cn/Latte/tree/main/t2v_required_models/vae_temporal_decoder) into `./pretrained_models/vae_temporal_decoder` and modify the config file as follows.
185
+
186
+ ```python
187
+ vae = dict(
188
+ type="VideoAutoencoderKLTemporalDecoder",
189
+ from_pretrained="pretrained_models/vae_temporal_decoder",
190
+ )
191
+ ```
192
+
193
+ ## Training
194
+
195
+ To resume training, run the following command. ``--load`` different from ``--ckpt-path`` as it loads the optimizer and dataloader states.
196
+
197
+ ```bash
198
+ torchrun --nnodes=1 --nproc_per_node=8 scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --load YOUR_PRETRAINED_CKPT
199
+ ```
200
+
201
+ To enable wandb logging, add `--wandb` to the command.
202
+
203
+ ```bash
204
+ WANDB_API_KEY=YOUR_WANDB_API_KEY torchrun --nnodes=1 --nproc_per_node=8 scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --wandb True
205
+ ```
206
+
207
+ You can modify corresponding config files to change the training settings. See more details [here](/docs/structure.md#training-config-demos).
208
+
209
+ ### Training Hyperparameters
210
+
211
+ 1. `dtype` is the data type for training. Only `fp16` and `bf16` are supported. ColossalAI automatically enables the mixed precision training for `fp16` and `bf16`. During training, we find `bf16` more stable.
212
+
213
+ ## Search batch size for buckets
214
+
215
+ To search the batch size for buckets, run the following command.
216
+
217
+ ```bash
218
+ torchrun --standalone --nproc_per_node 1 scripts/misc/search_bs.py configs/opensora-v1-2/misc/bs.py --data-path /mnt/nfs-207/sora_data/meta/searchbs.csv
219
+ ```
220
+
221
+ Here, your data should be a small one for searching purposes.
222
+
223
+ To control the batch size search range, you should specify `bucket_config` in the config file, where the value tuple is `(guess_value, range)` and the search will be performed in `guess_value±range`.
224
+
225
+ Here is an example of the bucket config:
226
+
227
+ ```python
228
+ bucket_config = {
229
+ "240p": {
230
+ 1: (100, 100),
231
+ 51: (24, 10),
232
+ 102: (12, 10),
233
+ 204: (4, 8),
234
+ 408: (2, 8),
235
+ },
236
+ "480p": {
237
+ 1: (50, 50),
238
+ 51: (6, 6),
239
+ 102: (3, 3),
240
+ 204: (1, 2),
241
+ },
242
+ }
243
+ ```
244
+
245
+ You can also specify a resolution to search for parallelism.
246
+
247
+ ```bash
248
+ torchrun --standalone --nproc_per_node 1 scripts/misc/search_bs.py configs/opensora-v1-2/misc/bs.py --data-path /mnt/nfs-207/sora_data/meta/searchbs.csv --resolution 240p
249
+ ```
250
+
251
+ The searching goal should be specified in the config file as well. There are two ways:
252
+
253
+ 1. Specify a `base_step_time` in the config file. The searching goal is to find the batch size that can achieve the `base_step_time` for each bucket.
254
+ 2. If `base_step_time` is not specified, it will be determined by `base` which is a tuple of `(batch_size, step_time)`. The step time is the maximum batch size allowed for the bucket.
255
+
256
+ The script will print the best batch size (and corresponding step time) for each bucket and save the output config file. Note that we assume a larger batch size is better, so the script use binary search to find the best batch size.
exp_code/1_benchmark/Open-Sora_v12/docs/config.md ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Config Guide
2
+
3
+ - [Inference Config](#inference-config)
4
+ - [Advanced Inference config](#advanced-inference-config)
5
+ - [Inference Args](#inference-args)
6
+ - [Training Config](#training-config)
7
+ - [Training Args](#training-args)
8
+ - [Training Bucket Configs](#training-bucket-configs)
9
+
10
+ Our config files follows [MMEgine](https://github.com/open-mmlab/mmengine). MMEngine will reads the config file (a `.py` file) and parse it into a dictionary-like object. We expose some fields in the config file to the command line arguments (defined in [opensora/utils/config_util.py](/opensora/utils/config_utils.py)). To change the inference settings, you can directly modify the corresponding config file. Or you can pass arguments to overwrite the config file.
11
+
12
+ ## Inference Config
13
+
14
+ The explanation of each field is provided below.
15
+
16
+ ```python
17
+ # Define sampling size
18
+ num_frames = 64 # number of frames, 1 means image
19
+ fps = 24 # frames per second (condition for generation)
20
+ frame_interval = 3 # output video will have fps/frame_interval frames per second
21
+ image_size = (240, 426) # image size (height, width)
22
+
23
+ # Define model
24
+ model = dict(
25
+ type="STDiT2-XL/2", # Select model type (STDiT-XL/2, DiT-XL/2, etc.)
26
+ from_pretrained="PRETRAINED_MODEL", # (Optional) Load from pretrained model
27
+ input_sq_size=512, # Base spatial position embedding size
28
+ qk_norm=True, # Normalize query and key in attention
29
+ enable_flash_attn=True, # (Optional) Speed up training and inference with flash attention
30
+ # Turn enable_flash_attn to False if you skip flashattn installation
31
+ enable_layernorm_kernel=True, # (Optional) Speed up training and inference with fused kernel
32
+ # Turn enable_layernorm_kernel to False if you skip apex installation
33
+ )
34
+ vae = dict(
35
+ type="VideoAutoencoderKL", # Select VAE type
36
+ from_pretrained="stabilityai/sd-vae-ft-ema", # Load from pretrained VAE
37
+ micro_batch_size=4, # VAE with micro batch size to save memory
38
+ )
39
+ text_encoder = dict(
40
+ type="t5", # Select text encoder type (t5, clip)
41
+ from_pretrained="DeepFloyd/t5-v1_1-xxl", # Load from pretrained text encoder
42
+ model_max_length=200, # Maximum length of input text
43
+ )
44
+ scheduler = dict(
45
+ type="iddpm", # Select scheduler type (iddpm, dpm-solver)
46
+ num_sampling_steps=100, # Number of sampling steps
47
+ cfg_scale=7.0, # hyper-parameter for classifier-free diffusion
48
+ cfg_channel=3, # how many channels to use for classifier-free diffusion, if None, use all channels
49
+ )
50
+ dtype = "bf16" # Computation type (fp16, fp32, bf16)
51
+
52
+ # Condition
53
+ prompt_path = "./assets/texts/t2v_samples.txt" # path to prompt file
54
+ prompt = None # prompt has higher priority than prompt_path
55
+
56
+ # Other settings
57
+ batch_size = 1 # batch size
58
+ seed = 42 # random seed
59
+ save_dir = "./samples" # path to save samples
60
+ ```
61
+
62
+ ## Advanced Inference config
63
+
64
+ The [`inference-long.py`](/scripts/inference-long.py) script is used to generate long videos, and it also provides all functions of the [`inference.py`](/scripts/inference.py) script. The following arguments are specific to the `inference-long.py` script.
65
+
66
+ ```python
67
+ loop = 10
68
+ condition_frame_length = 4
69
+ reference_path = [
70
+ "https://cdn.openai.com/tmp/s/interp/d0.mp4",
71
+ None,
72
+ "assets/images/condition/wave.png",
73
+ ]
74
+ mask_strategy = [
75
+ "0,0,0,0,8,0.3",
76
+ None,
77
+ "0,0,0,0,1;0,0,0,-1,1",
78
+ ]
79
+ ```
80
+
81
+ The following figure provides an illustration of the `mask_strategy`:
82
+
83
+ ![mask_strategy](/assets/readme/report_mask_config.png)
84
+
85
+ To generate a long video of infinite time, our strategy is to generate a video with a fixed length first, and then use the last `condition_frame_length` number of frames for the next video generation. This will loop for `loop` times. Thus, the total length of the video is `loop * (num_frames - condition_frame_length) + condition_frame_length`.
86
+
87
+ To condition the generation on images or videos, we introduce the `mask_strategy`. It is 6 number tuples separated by `;`. Each tuple indicate an insertion of the condition image or video to the target generation. The meaning of each number is:
88
+
89
+ - **First number**: the loop index of the condition image or video. (0 means the first loop, 1 means the second loop, etc.)
90
+ - **Second number**: the index of the condition image or video in the `reference_path`.
91
+ - **Third number**: the start frame of the condition image or video. (0 means the first frame, and images only have one frame)
92
+ - **Fourth number**: the location to insert. (0 means insert at the beginning, 1 means insert at the end, and -1 means insert at the end of the video)
93
+ - **Fifth number**: the number of frames to insert. (1 means insert one frame, and images only have one frame)
94
+ - **Sixth number**: the edit rate of the condition image or video. (0 means no edit, 1 means full edit).
95
+
96
+ To facilitate usage, we also accept passing the reference path and mask strategy as a json appended to the prompt. For example,
97
+
98
+ ```plaintext
99
+ 'Drone view of waves crashing against the rugged cliffs along Big Sur\'s garay point beach. The crashing blue waters create white-tipped waves, while the golden light of the setting sun illuminates the rocky shore. A small island with a lighthouse sits in the distance, and green shrubbery covers the cliff\'s edge. The steep drop from the road down to the beach is a dramatic feat, with the cliff\'s edges jutting out over the sea. This is a view that captures the raw beauty of the coast and the rugged landscape of the Pacific Coast Highway.{"reference_path": "assets/images/condition/cliff.png", "mask_strategy": "0"}'
100
+ ```
101
+
102
+ ## Inference Args
103
+
104
+ You can use `python scripts/inference.py --help` to see the following arguments:
105
+
106
+ - `--seed`: random seed
107
+ - `--ckpt-path`: path to the checkpoint (`model["from_pretrained"]`)
108
+ - `--batch-size`: batch size
109
+ - `--save-dir`: path to save samples
110
+ - `--sample-name`: if None, the sample will be name by `sample_{index}.mp4/png`, otherwise, the sample will be named by `{sample_name}_{index}.mp4/png`
111
+ - `--start-index`: start index of the sample
112
+ - `--end-index`: end index of the sample
113
+ - `--num-sample`: number of samples to generate for each prompt. The sample will be suffixed by `-0`, `-1`, `-2`, etc.
114
+ - `--prompt-as-path`: if True, use the prompt as the name for saving samples
115
+ - `--prompt-path`: path to the prompt file
116
+ - `--prompt`: prompt string list
117
+ - `--num-frames`: number of frames
118
+ - `--fps`: frames per second
119
+ - `--image-size`: image size
120
+ - `--num-sampling-steps`: number of sampling steps (`scheduler["num_sampling_steps"]`)
121
+ - `--cfg-scale`: hyper-parameter for classifier-free diffusion (`scheduler["cfg_scale"]`)
122
+ - `--loop`: loop for long video generation
123
+ - `--condition-frame-length`: condition frame length for long video generation
124
+ - `--reference-path`: reference path for long video generation
125
+ - `--mask-strategy`: mask strategy for long video generation
126
+
127
+ Example commands for inference can be found in [commands.md](/docs/commands.md).
128
+
129
+ ## Training Config
130
+
131
+ ```python
132
+ # Define dataset
133
+ dataset = dict(
134
+ type="VariableVideoTextDataset", # Select dataset type
135
+ # VideoTextDataset for OpenSora 1.0, VariableVideoTextDataset for OpenSora 1.1 and 1.2
136
+ data_path=None, # Path to the dataset
137
+ num_frames=None, # Number of frames, set None since we support dynamic training
138
+ frame_interval=3, # Frame interval
139
+ image_size=(None, None), # Image size, set None since we support dynamic training
140
+ transform_name="resize_crop", # Transform name
141
+ )
142
+ # bucket config usage see next section
143
+ bucket_config = {
144
+ "144p": {1: (1.0, 48), 16: (1.0, 17), 32: (1.0, 9), 64: (1.0, 4), 128: (1.0, 1)},
145
+ "256": {1: (0.8, 254), 16: (0.5, 17), 32: (0.5, 9), 64: (0.5, 4), 128: (0.5, 1)},
146
+ "240p": {1: (0.1, 20), 16: (0.9, 17), 32: (0.8, 9), 64: (0.8, 4), 128: (0.8, 2)},
147
+ "512": {1: (0.5, 86), 16: (0.2, 4), 32: (0.2, 2), 64: (0.2, 1), 128: (0.0, None)},
148
+ "480p": {1: (0.4, 54), 16: (0.4, 4), 32: (0.0, None)},
149
+ "720p": {1: (0.1, 20), 16: (0.1, 2), 32: (0.0, None)},
150
+ "1024": {1: (0.3, 20)},
151
+ "1080p": {1: (0.4, 8)},
152
+ }
153
+ # mask ratio in training
154
+ mask_ratios = {
155
+ "identity": 0.75, # 75% no mask
156
+ "quarter_random": 0.025, # 2.5% random mask with 1 frame to 1/4 #frames
157
+ "quarter_head": 0.025, # 2.5% mask at the beginning with 1 frame to 1/4 #frames
158
+ "quarter_tail": 0.025, # 2.5% mask at the end with 1 frame to 1/4 #frames
159
+ "quarter_head_tail": 0.05, # 5% mask at the beginning and end with 1 frame to 1/4 #frames
160
+ "image_random": 0.025, # 2.5% random mask with 1 image to 1/4 #images
161
+ "image_head": 0.025, # 2.5% mask at the beginning with 1 image to 1/4 #images
162
+ "image_tail": 0.025, # 2.5% mask at the end with 1 image to 1/4 #images
163
+ "image_head_tail": 0.05, # 5% mask at the beginning and end with 1 image to 1/4 #images
164
+ }
165
+
166
+ # Define acceleration
167
+ num_workers = 8 # Number of workers for dataloader
168
+ num_bucket_build_workers = 16 # Number of workers for bucket building
169
+ dtype = "bf16" # Computation type (fp16, fp32, bf16)
170
+ grad_checkpoint = True # Use gradient checkpointing
171
+ plugin = "zero2" # Plugin for training
172
+ sp_size = 1 # Sequence parallel size
173
+
174
+ # Define model
175
+ model = dict(
176
+ type="STDiT2-XL/2", # Select model type (STDiT-XL/2, DiT-XL/2, etc.)
177
+ from_pretrained=None, # Load from pretrained model
178
+ input_sq_size=512, # Base spatial position embedding size
179
+ qk_norm=True, # Normalize query and key in attention
180
+ enable_flash_attn=True, # (Optional) Speed up training and inference with flash attention
181
+ enable_layernorm_kernel=True, # (Optional) Speed up training and inference with fused kernel
182
+ )
183
+ vae = dict(
184
+ type="VideoAutoencoderKL", # Select VAE type
185
+ from_pretrained="stabilityai/sd-vae-ft-ema",
186
+ micro_batch_size=4, # VAE with micro batch size to save memory
187
+ local_files_only=True, # Load from local files only (first time should be false)
188
+ )
189
+ text_encoder = dict(
190
+ type="t5", # Select text encoder type (t5, clip)
191
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
192
+ model_max_length=200, # Maximum length of input text
193
+ shardformer=True, # Use shardformer
194
+ local_files_only=True, # Load from local files only (first time should be false)
195
+ )
196
+ scheduler = dict(
197
+ type="iddpm", # Select scheduler type (iddpm, iddpm-speed)
198
+ timestep_respacing="",
199
+ )
200
+
201
+ # Others
202
+ seed = 42 # random seed
203
+ outputs = "outputs" # path to save outputs
204
+ wandb = False # Use wandb or not
205
+
206
+ epochs = 1000 # Number of epochs (set a large number and kill the process when you want to stop)
207
+ log_every = 10
208
+ ckpt_every = 500
209
+ load = None
210
+
211
+ batch_size = None
212
+ lr = 2e-5
213
+ grad_clip = 1.0
214
+ ```
215
+
216
+ ## Training Args
217
+
218
+ - `--seed`: random seed
219
+ - `--ckpt-path`: path to the checkpoint (`model["from_pretrained"]`)
220
+ - `--batch-size`: batch size
221
+ - `--wandb`: use wandb or not
222
+ - `--load`: path to the checkpoint to load
223
+ - `--data-path`: path to the dataset (`dataset["data_path"]`)
224
+
225
+ See [commands.md](/docs/commands.md) for example commands.
226
+
227
+ ## Training Bucket Configs
228
+
229
+ We support multi-resolution/aspect-ratio/num_frames training with bucket. To enable dynamic training (for STDiT2), use `VariableVideoText` dataset, and set the `bucket_config` in the config. An example is:
230
+
231
+ ```python
232
+ bucket_config = {
233
+ "240p": {16: (1.0, 16), 32: (1.0, 8), 64: (1.0, 4), 128: (1.0, 2)},
234
+ "256": {1: (1.0, 256)},
235
+ "512": {1: (1.0, 80)},
236
+ "480p": {1: (1.0, 52), 16: (0.5, 4), 32: (0.0, None)},
237
+ "720p": {16: (1.0, 2), 32: (0.0, None)},
238
+ "1024": {1: (1.0, 20)},
239
+ "1080p": {1: (1.0, 8)},
240
+ }
241
+ ```
242
+
243
+ This looks a bit difficult to understand at the first glance. Let's understand this config step by step.
244
+
245
+ ### Three-level bucket
246
+
247
+ ![bucket](/assets/readme/report_bucket.png)
248
+
249
+ We design a three-level bucket: `(resolution, num_frames, aspect_ratios)`. The resolution and aspect ratios is predefined in [aspect.py](/opensora/datasets/aspect.py). Commonly used resolutions (e.g., 240p, 1080p) are supported, and the name represents the number of pixels (e.g., 240p is 240x426, however, we define 240p to represent any size with HxW approximately 240x426=102240 pixels). The aspect ratios are defined for each resolution. You do not need to define the aspect ratios in the `bucket_config`.
250
+
251
+ The `num_frames` is the number of frames in each sample, with `num_frames=1` especially for images. If `frame_intervals` is not 1, a bucket with `num_frames=k` will contain videos with `k*frame_intervals` frames except for images. Only a video with more than `num_frames` and more than `resolution` pixels will be likely to be put into the bucket.
252
+
253
+ The two number defined in the bucket config is `(keep_prob, batch_size)`. Since the memory and speed of samples from different buckets may be different, we use `batch_size` to balance the processing speed. Since our computation is limited, we cannot process videos with their original resolution as stated in OpenAI's sora's report. Thus, we give a `keep_prob` to control the number of samples in each bucket. The `keep_prob` is the probability to keep a sample in the bucket. Let's take the following config as an example:
254
+
255
+ ```python
256
+ bucket_config = {
257
+ "480p": {16: (1.0, 8),},
258
+ "720p": {16: (0.5, 4),},
259
+ "1080p": {16: (0.2, 2)},
260
+ "4K", {16: (0.1, 1)},
261
+ }
262
+ ```
263
+
264
+ Given a 2K video with more than 16 frames, the program will first try to put it into bucket "1080p" since it has a larger resolution than 1080p but less than 4K. Since the `keep_prob` for 1080p is 20%, a random number is generated, and if it is less than 0.2, the video will be put into the bucket. If the video is not put into the bucket, the program will try to put it into the "720p" bucket. Since the `keep_prob` for 720p is 50%, the video has a 50% chance to be put into the bucket. If the video is not put into the bucket, the program will try to put it into the "480p" bucket directly as it is the smallest resolution.
265
+
266
+ ### Examples
267
+
268
+ Let's see some simple examples to understand the bucket config. First, the aspect ratio bucket is compulsory, if you want to modify this you need to add your own resolution definition in [aspect.py](/opensora/datasets/aspect.py). Then, to keep only 256x256 resolution and 16 frames as OpenSora 1.0, you can use the following config:
269
+
270
+ ```python
271
+ bucket_config = {
272
+ "256": {16: (1.0, 8)},
273
+ }
274
+ ```
275
+
276
+ If you want to train a model supporting different resolutions of images, you can use the following config (example [image.py](/configs/opensora-v1-1/train/image.py)):
277
+
278
+ ```python
279
+ bucket_config = {
280
+ "256": {1: (1.0, 256)},
281
+ "512": {1: (1.0, 80)},
282
+ "480p": {1: (1.0, 52)},
283
+ "1024": {1: (1.0, 20)},
284
+ "1080p": {1: (1.0, 8)},
285
+ }
286
+ ```
287
+
288
+ Or if you find the number of high-resolution images is too large, you can modify the `keep_prob` to reduce the number of samples in the bucket:
289
+
290
+ ```python
291
+ bucket_config = {
292
+ "256": {1: (1.0, 256)},
293
+ "512": {1: (0.8, 80)},
294
+ "480p": {1: (0.5, 52)},
295
+ "1024": {1: (0.5, 20)},
296
+ "1080p": {1: (0.2, 8)},
297
+ }
298
+ ```
299
+
300
+ And similarly for videos (example [video.py](/configs/opensora-v1-1/train/video.py)):
301
+
302
+ ```python
303
+ bucket_config = {
304
+ "240p": {16: (1.0, 16), 32: (1.0, 8), 64: (1.0, 4), 128: (1.0, 2)},
305
+ "480p": {16: (1.0, 4)},
306
+ "720p": {16: (0.5, 2)},
307
+ }
308
+ ```
309
+
310
+ Note that in the above case, a video with 480p resolution and more than 16 frames will all go into bucket `("480p", 16)`, since they all satisfy this bucket's requirement. But training long videos with 480p resolution may be slow, so you can modify the config as follows to enforce the video with more than 32 frames to go into the 240p bucket.
311
+
312
+ ```python
313
+ bucket_config = {
314
+ "240p": {16: (1.0, 16), 32: (1.0, 8), 64: (1.0, 4), 128: (1.0, 2)},
315
+ "480p": {16: (1.0, 4), 32: (0.0, None)},
316
+ "720p": {16: (0.5, 2)},
317
+ }
318
+ ```
319
+
320
+ Combine the above examples together, we think you can understand the bucket config provided at the beginning of this section and in the config files.
exp_code/1_benchmark/Open-Sora_v12/docs/data_processing.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Data Processing
2
+ >Open-Sora v1.2 uses Data Propcessing Pipeline v1.1.
3
+
4
+ We establish a complete pipeline for video/image data processing. The pipeline is shown below.
5
+
6
+ ![pipeline](/assets/readme/report_data_pipeline.png)
7
+
8
+ First, raw videos,
9
+ either from the Internet or public datasets, are split into shorter clips based on scene detection.
10
+ Then, we evaluate these videos by predicting multiple scores using existing models. We first predict the aesthetic score
11
+ and the optical flow score for a video. We also conduct OCR to detect texts in the video. Only videos with satisfactory
12
+ evaluation results are sent to the next step for captioning. After captioning, the matching score is also calculated as
13
+ an assessment of video-text alignment. Finally, we filter samples based on the matching score and
14
+ conduct camera motion detection for the remaining samples.
15
+ In summary, our pipeline produces video-text pairs which have high aesthetic quality, large video motion and strong
16
+ semantic consistency.
17
+
18
+ Below is an example workflow to process videos.
19
+
20
+ ```bash
21
+ ROOT_VIDEO="/path/to/video/folder"
22
+ ROOT_CLIPS="/path/to/video/clips/folder"
23
+ ROOT_META="/path/to/meta/folder"
24
+
25
+ # 1.1 Create a meta file from a video folder. This should output ${ROOT_META}/meta.csv
26
+ python -m tools.datasets.convert video ${ROOT_VIDEO} --output ${ROOT_META}/meta.csv
27
+
28
+ # 1.2 Get video information and remove broken videos. This should output ${ROOT_META}/meta_info_fmin1.csv
29
+ python -m tools.datasets.datautil ${ROOT_META}/meta.csv --info --fmin 1
30
+
31
+ # 2.1 Detect scenes. This should output ${ROOT_META}/meta_info_fmin1_timestamp.csv
32
+ python -m tools.scene_cut.scene_detect ${ROOT_META}/meta_info_fmin1.csv
33
+
34
+ # 2.2 Cut video into clips based on scenes. This should produce video clips under ${ROOT_CLIPS}
35
+ python -m tools.scene_cut.cut ${ROOT_META}/meta_info_fmin1_timestamp.csv --save_dir ${ROOT_CLIPS}
36
+
37
+ # 2.3 Create a meta file for video clips. This should output ${ROOT_META}/meta_clips.csv
38
+ python -m tools.datasets.convert video ${ROOT_CLIPS} --output ${ROOT_META}/meta_clips.csv
39
+
40
+ # 2.4 Get clips information and remove broken ones. This should output ${ROOT_META}/meta_clips_info_fmin1.csv
41
+ python -m tools.datasets.datautil ${ROOT_META}/meta_clips.csv --info --fmin 1
42
+
43
+ # 3.1 Predict aesthetic scores. This should output ${ROOT_META}/meta_clips_info_fmin1_aes.csv
44
+ torchrun --nproc_per_node 8 -m tools.scoring.aesthetic.inference \
45
+ ${ROOT_META}/meta_clips_info_fmin1.csv \
46
+ --bs 1024 \
47
+ --num_workers 16
48
+
49
+ # 3.2 Filter by aesthetic scores. This should output ${ROOT_META}/meta_clips_info_fmin1_aes_aesmin5.csv
50
+ python -m tools.datasets.datautil ${ROOT_META}/meta_clips_info_fmin1_aes.csv --aesmin 5
51
+
52
+ # 4.1 Generate caption. This should output ${ROOT_META}/meta_clips_info_fmin1_aes_aesmin5_caption_part*.csv
53
+ torchrun --nproc_per_node 8 --standalone -m tools.caption.caption_llava \
54
+ ${ROOT_META}/meta_clips_info_fmin1_aes_aesmin5.csv \
55
+ --dp-size 8 \
56
+ --tp-size 1 \
57
+ --model-path /path/to/llava-v1.6-mistral-7b \
58
+ --prompt video
59
+
60
+ # 4.2 Merge caption results. This should output ${ROOT_META}/meta_clips_caption.csv
61
+ python -m tools.datasets.datautil ${ROOT_META}/meta_clips_info_fmin1_aes_aesmin5_caption_part*.csv --output ${ROOT_META}/meta_clips_caption.csv
62
+
63
+ # 4.3 Clean caption. This should output ${ROOT_META}/meta_clips_caption_cleaned.csv
64
+ python -m tools.datasets.datautil \
65
+ ${ROOT_META}/meta_clips_caption.csv \
66
+ --clean-caption \
67
+ --refine-llm-caption \
68
+ --remove-empty-caption \
69
+ --output ${ROOT_META}/meta_clips_caption_cleaned.csv
70
+
71
+ # 4.4 Optionally generate tags (e.g., objects) based on the captions. This should output your_output_prefix_{key}.csv
72
+ torchrun --nproc_per_node 8 --standalone -m tools.caption.caption_llama3 ${ROOT_META}/meta_clips_caption_cleaned.csv --key objects --output_prefix your_output_prefix
73
+
74
+ ```
75
+
76
+
77
+ For more information, please refer to:
78
+ - [Dataset Management](../tools/datasets/README.md)
79
+ - [Scene Detection and Video Splitting](../tools/scene_cut/README.md)
80
+ - [Scoring and Filtering](../tools/scoring/README.md)
81
+ - [Captioning](../tools/caption/README.md)
exp_code/1_benchmark/Open-Sora_v12/docs/datasets.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Datasets
2
+
3
+ For Open-Sora 1.2, we conduct mixed training with both images and videos. The main datasets we use are listed below.
4
+ Please refer to [README](/README.md#data-processing) for data processing.
5
+
6
+ ## Video
7
+
8
+ ### Webvid-10M
9
+
10
+ [Webvid-10M](https://github.com/m-bain/webvid) contains 10 million video-text pairs scraped from the stock footage sites.
11
+ We first train the model on this dataset (40k hours) for 30k steps (2 epochs).
12
+
13
+ ### Panda-70M
14
+
15
+ [Panda-70M](https://github.com/snap-research/Panda-70M) is a large-scale dataset with 70M video-caption pairs.
16
+ We use the [training-10M subset](https://github.com/snap-research/Panda-70M/tree/main/dataset_dataloading) for training,
17
+ which contains ~10M videos of better quality.
18
+
19
+ ### Mixkit
20
+
21
+ [Mixkit](https://mixkit.co/) is a video website where we obtained 9k videos.
22
+
23
+ ### Pixabay
24
+
25
+ [Pixabay](https://pixabay.com/videos/) is video website where we obtained 60.5k videos.
26
+
27
+ ### Pexels
28
+
29
+ [Pexels](https://www.pexels.com/) is a popular online platform that provides high-quality stock photos, videos, and music for free.
30
+ Most videos from this website are of high quality. Thus, we use them for both pre-training and HQ fine-tuning.
31
+ We really appreciate the great platform and the contributors!
32
+
33
+ ### Inter4K
34
+
35
+ [Inter4K](https://github.com/alexandrosstergiou/Inter4K) is a dataset containing 1K video clips with 4K resolution.
36
+ The dataset is proposed for super-resolution tasks. We use the dataset for HQ fine-tuning.
37
+
38
+ ### HD-VG-130M
39
+
40
+ [HD-VG-130M](https://github.com/daooshee/HD-VG-130M?tab=readme-ov-file) comprises 130M text-video pairs.
41
+ The caption is generated by BLIP-2.
42
+ We find the scene and the text quality are relatively poor. For OpenSora 1.0, we only use ~350K samples from this dataset.
43
+
44
+ ### MiraData
45
+
46
+ [MiraData](https://github.com/mira-space/MiraData): a high-quality dataset with 77k long videos, mainly from games and city/scenic exploration.
47
+
48
+
49
+ ### Vript
50
+
51
+ [Vript](https://github.com/mutonix/Vript/tree/main): a densely annotated dataset of 400k videos.
52
+
53
+
54
+ ## Image
55
+
56
+ ### Midjourney-v5-1.7M
57
+
58
+ [Midjourney-v5-1.7M](https://huggingface.co/datasets/wanng/midjourney-v5-202304-clean) includes 1.7M image-text pairs.
59
+ In detail, this dataset introduces two subsets: original and upscale.
60
+ This dataset is proposed for exploring the relationship of prompts and high-quality images.
61
+
62
+ ### Midjourney-kaggle-clean
63
+
64
+ [Midjourney-kaggle-clean](https://huggingface.co/datasets/wanng/midjourney-kaggle-clean) is a reconstructed version of [Midjourney User Prompts & Generated Images (250k)](https://www.kaggle.com/datasets/succinctlyai/midjourney-texttoimage?select=general-01_2022_06_20.json%5D), which is cleaned by rules.
65
+ Moreover, this dataset is divided into two subsets: original and upscale.
66
+ This dataset is proposed for enabling research on text-to-image model prompting.
67
+
68
+ ### Unsplash-lite
69
+
70
+ The [Unsplash-lite](https://github.com/unsplash/datasets) Dataset comprises 25k nature-themed Unsplash photos, 25k keywords, and 1M searches.
71
+ This dataset covers a vast range of uses and contexts. Its extensive scope in intent and semantics opens new avenues for research and learning.
72
+
73
+ ### LAION-AESTHETICS 6.5+
74
+
75
+ LAION aesthetic 6.5+ dataset is a subset of the LAION dataset, which contains 625K high-quality images with aesthetic scores > 6.5. However, as LAION is currently not publicly available, we use this 168k [subset](https://huggingface.co/datasets/bhargavsdesai/laion_improved_aesthetics_6.5plus_with_images).
exp_code/1_benchmark/Open-Sora_v12/docs/installation.md ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Installation
2
+
3
+ Requirements are listed in `requirements` folder.
4
+ Note that besides these packages, some packages needs to be mannually installed, and are detailed in the following sections.
5
+
6
+ ## Training & Inference
7
+
8
+ You need to install `opensora` for training and inference. You can follow the steps below for installation. We also provide guideline for different CUDA versions for compatiblity.
9
+
10
+ Please note that the default installation is for training and inference only. Other optional dependencies are detailed in the sections [Data Processing](#data-processing), [Evaluation](#evaluation), and [VAE](#vae) respectively.
11
+
12
+ ### Step 1: Install PyTorch and xformers
13
+
14
+ First of all, make sure you have the latest build toolkit for Python.
15
+
16
+ ```bash
17
+ # update build libs
18
+ pip install -U pip setuptools wheel
19
+ ```
20
+
21
+ If you are using **CUDA 12.1**, you can execute the command below to directly install PyTorch, torchvision and xformers.
22
+
23
+ ```bash
24
+ # install pytorch, torchvision, and xformers
25
+ pip install -r requirements/requirements-cu121.txt
26
+ ```
27
+
28
+ If you are using different CUDA versions, you need to manually install `torch`, `torchvision` and `xformers`. You can find the compatible distributions according to the links below.
29
+
30
+ - PyTorch: choose install commands from [PyTorch installation page](https://pytorch.org/get-started/locally/) based on your own CUDA version.
31
+ - xformers: choose install commands from [xformers repo](https://github.com/facebookresearch/xformers?tab=readme-ov-file#installing-xformers) based on your own CUDA version.
32
+
33
+ ### Step 2: Install Open-Sora
34
+
35
+ Then, you can install the project for training and inference with the following commands:
36
+
37
+ ```bash
38
+ # install this project
39
+ git clone https://github.com/hpcaitech/Open-Sora
40
+ cd Open-Sora
41
+
42
+ # the default installation is for inference only
43
+ pip install -v . # NOTE: for development mode, run `pip install -v -e .`
44
+ ```
45
+
46
+ ### Step 3: Install Acceleration Tools (Optional)
47
+
48
+ This is optional but recommended for faster speed, especially for training. To enable `layernorm_kernel` and `flash_attn`, you need to install `apex` and `flash-attn` with the following commands.
49
+
50
+ ```bash
51
+ # install flash attention
52
+ # set enable_flash_attn=False in config to disable flash attention
53
+ pip install packaging ninja
54
+ pip install flash-attn --no-build-isolation
55
+
56
+ # install apex, the compilation will take a long time
57
+ # set enable_layernorm_kernel=False in config to disable apex
58
+ pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" git+https://github.com/NVIDIA/apex.git
59
+ ```
60
+
61
+ ## Data Processing
62
+
63
+ ### Step 1: Install Requirements
64
+
65
+ First, run the following command to install requirements:
66
+
67
+ ```bash
68
+ pip install -v .[data]
69
+ # For development: `pip install -v -e .[eval]`
70
+ ```
71
+
72
+ Next, you need to manually install the packages listed in the following sections specific to your data processing needs.
73
+
74
+ ### Step 2: Install OpenCV
75
+
76
+ To get image and video information, we use [opencv-python](https://github.com/opencv/opencv-python). You can install it with pip:
77
+
78
+ ```bash
79
+ pip install opencv-python
80
+ ```
81
+
82
+ However, if your videos are in av1 codec instead of h264, you need to install ffmpeg (already in our [requirement script](../requirements/requirements-data.txt)), then run the following to make conda support av1 codec:
83
+
84
+ ```bash
85
+ pip uninstall opencv-python
86
+ conda install -c conda-forge opencv
87
+ ```
88
+
89
+ ### Step 3: Install Task-specific Dependencies
90
+
91
+ We have a variety of data processing pipelines, each requires its own dependencies. You can refer to the sections below to install dependencies according to your own needs.
92
+
93
+ #### LLaVA Captioning
94
+
95
+ You need to manually install LLaVA with the following command:
96
+
97
+ ```bash
98
+ pip install --no-deps llava@git+https://github.com/haotian-liu/LLaVA.git@v1.2.2.post1
99
+ ```
100
+
101
+ #### PLLaVA Captioning
102
+
103
+ You need to manually install PLLaVa with the following commands:
104
+
105
+ ```bash
106
+ cd tools/caption/pllava_dir # Assume you are in Open-Sora-dev root directory
107
+ git clone https://github.com/magic-research/PLLaVA.git
108
+ cd PLLaVA
109
+ git checkout fd9194a # since there is no version tag, we use this commit
110
+ python python_scripts/hf.py # download the PLLaVA weights
111
+
112
+ # IMPORTANT: create new environment for reliable pllava performances:
113
+ conda create -n pllava python=3.10
114
+ # You need to manually install `torch`, `torchvision` and `xformers` for different CUDA versions, the following works for CUDA 12.1:
115
+ conda activate pllava
116
+ pip install -r ../../../requirements/requirements-cu121.txt
117
+ pip install packaging ninja
118
+ pip install flash-attn --no-build-isolation
119
+ # You may manually remove any lines in requirements.txt that contains `cu11`, then run `pip install -r requirements.txt`
120
+ # Alternatively, use our prepared pllava environment:
121
+ pip install -r ../../../../requirements/requirements-pllava.txt
122
+ ```
123
+
124
+ #### Scene Detection
125
+
126
+ We use [`PySceneDetect`](https://github.com/Breakthrough/PySceneDetect) for this job. You need to manually run the following:
127
+
128
+ ```bash
129
+ pip install scenedetect[opencv] --upgrade
130
+ ```
131
+
132
+ #### OCR
133
+
134
+ You need to go into `path_to_your_env/lib/python3.10/site-packages/mmdet/__init__.py`
135
+ and change the assert of `mmcv_version < digit_version(mmcv_maximum_version)` to `mmcv_version <= digit_version(mmcv_maximum_version)`.
136
+
137
+ If you are unsure of your path to the mmdet init file, simply run our [OCR command](../tools/scoring/README.md), wait for the mmdeet assertion error on mmcv versions.
138
+ The error will contain the exact path to the mmdet init file.
139
+
140
+
141
+ ## Evaluation
142
+
143
+ ### Step 1: Install Requirements
144
+
145
+ To conduct evaluation, run the following command to install requirements:
146
+
147
+ ```bash
148
+ pip install -v .[eval]
149
+ # For development:`pip install -v -e .[eval]`
150
+ ```
151
+
152
+ ### Step 2: Install VBench
153
+
154
+ <!-- You need to manually install [VBench](https://github.com/Vchitect/VBench):
155
+
156
+ ```bash
157
+ pip install --no-deps vbench==0.1.1
158
+ # If the installation shows a warning about the intalled vbench not in PATH, you need to add it by:
159
+ export PATH="/path/to/vbench:$PATH"
160
+ ``` -->
161
+
162
+ You need to install VBench mannually by:
163
+ ```bash
164
+ # first clone their repo
165
+ cd .. # assume you are in the Open-Sora root folder, you may install at other location but make sure the soft link paths later are correct
166
+ git clone https://github.com/Vchitect/VBench.git
167
+ cd VBench
168
+ git checkout v0.1.2
169
+
170
+ # next, fix their hard-coded path isse
171
+ vim vbench2_beta_i2v/utils.py
172
+ # find `image_root` in the `load_i2v_dimension_info` function, change it to point to your appropriate image folder
173
+
174
+ # last, create softlinks
175
+ cd ../Open-Sora # or `cd ../Open-Sora-dev` for development
176
+ ln -s ../VBench/vbench vbench # you may need to change ../VBench/vbench to your corresponding path
177
+ ln -s ../VBench/vbench2_beta_i2v vbench2_beta_i2v # you may need to change ../VBench/vbench_beta_i2v to your corresponding path
178
+ # later you need to make sure to run evaluation from your Open-Sora folder, else vbench, vbench2_beta_i2v cannot be found
179
+ ```
180
+
181
+
182
+ ### Step 3: Install `cupy` for Potential VAE Errors
183
+
184
+ You need to mannually install [cupy](https://docs.cupy.dev/en/stable/install.html).
185
+
186
+ - For CUDA v11.2~11.8 (x86_64 / aarch64), `pip install cupy-cuda11x`
187
+ - For CUDA v12.x (x86_64 / aarch64), `pip install cupy-cuda12x`
188
+
189
+ Note that for VAE evaluation, you may run into error with `ModuleNotFoundError: No module named 'torchvision.transforms.functional_tensor'`, in this case, you need to go to the corresponding file (`.../pytorchvideo/transforms/augmentations.py`) reporting this error, then change as following:
190
+
191
+ ```python
192
+ # find the original line:
193
+ import torchvision.transforms.functional_tensor as F_t
194
+ # change to:
195
+ import torchvision.transforms._functional_tensor as F_t
196
+ ```
197
+
198
+
199
+
200
+
201
+ ## VAE
202
+
203
+ ### Step 1: Install Requirements
204
+
205
+ To train and evaluate your own VAE, run the following command to install requirements:
206
+
207
+ ```bash
208
+ pip install -v .[vae]
209
+ # For development:`pip install -v -e .[vae]`
210
+ ```
211
+
212
+ ### Step 2: VAE Evaluation (`cupy` and Potential VAE Errors)
213
+
214
+ Refer to the [Evaluation's VAE section](#step-3-install-cupy-for-potential-vae-errors) above.
exp_code/1_benchmark/Open-Sora_v12/docs/report_01.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open-Sora 1.0 Report
2
+
3
+ OpenAI's Sora is amazing at generating one minutes high quality videos. However, it reveals almost no information about its details. To make AI more "open", we are dedicated to build an open-source version of Sora. This report describes our first attempt to train a transformer-based video diffusion model.
4
+
5
+ ## Efficiency in choosing the architecture
6
+
7
+ To lower the computational cost, we want to utilize existing VAE models. Sora uses spatial-temporal VAE to reduce the temporal dimensions. However, we found that there is no open-source high-quality spatial-temporal VAE model. [MAGVIT](https://github.com/google-research/magvit)'s 4x4x4 VAE is not open-sourced, while [VideoGPT](https://wilson1yan.github.io/videogpt/index.html)'s 2x4x4 VAE has a low quality in our experiments. Thus, we decided to use a 2D VAE (from [Stability-AI](https://huggingface.co/stabilityai/sd-vae-ft-mse-original)) in our first version.
8
+
9
+ The video training involves a large amount of tokens. Considering 24fps 1min videos, we have 1440 frames. With VAE downsampling 4x and patch size downsampling 2x, we have 1440x1024≈1.5M tokens. Full attention on 1.5M tokens leads to a huge computational cost. Thus, we use spatial-temporal attention to reduce the cost following [Latte](https://github.com/Vchitect/Latte).
10
+
11
+ As shown in the figure, we insert a temporal attention right after each spatial attention in STDiT (ST stands for spatial-temporal). This is similar to variant 3 in Latte's paper. However, we do not control a similar number of parameters for these variants. While Latte's paper claims their variant is better than variant 3, our experiments on 16x256x256 videos show that with same number of iterations, the performance ranks as: DiT (full) > STDiT (Sequential) > STDiT (Parallel) ≈ Latte. Thus, we choose STDiT (Sequential) out of efficiency. Speed benchmark is provided [here](/docs/acceleration.md#efficient-stdit).
12
+
13
+ ![Architecture Comparison](/assets/readme/report_arch_comp.png)
14
+
15
+ To focus on video generation, we hope to train the model based on a powerful image generation model. [PixArt-α](https://github.com/PixArt-alpha/PixArt-alpha) is an efficiently trained high-quality image generation model with T5-conditioned DiT structure. We initialize our model with PixArt-α and initialize the projection layer of inserted temporal attention with zero. This initialization preserves model's ability of image generation at beginning, while Latte's architecture cannot. The inserted attention increases the number of parameter from 580M to 724M.
16
+
17
+ ![Architecture](/assets/readme/report_arch.jpg)
18
+
19
+ Drawing from the success of PixArt-α and Stable Video Diffusion, we also adopt a progressive training strategy: 16x256x256 on 366K pretraining datasets, and then 16x256x256, 16x512x512, and 64x512x512 on 20K datasets. With scaled position embedding, this strategy greatly reduces the computational cost.
20
+
21
+ We also try to use a 3D patch embedder in DiT. However, with 2x downsampling on temporal dimension, the generated videos have a low quality. Thus, we leave the downsampling to temporal VAE in our next version. For now, we sample at every 3 frames with 16 frames training and every 2 frames with 64 frames training.
22
+
23
+ ## Data is the key to high quality
24
+
25
+ We find that the number and quality of data have a great impact on the quality of generated videos, even larger than the model architecture and training strategy. At this time, we only prepared the first split (366K video clips) from [HD-VG-130M](https://github.com/daooshee/HD-VG-130M). The quality of these videos varies greatly, and the captions are not that accurate. Thus, we further collect 20k relatively high quality videos from [Pexels](https://www.pexels.com/), which provides free license videos. We label the video with LLaVA, an image captioning model, with three frames and a designed prompt. With designed prompt, LLaVA can generate good quality of captions.
26
+
27
+ ![Caption](/assets/readme/report_caption.png)
28
+
29
+ As we lay more emphasis on the quality of data, we prepare to collect more data and build a video preprocessing pipeline in our next version.
30
+
31
+ ## Training Details
32
+
33
+ With a limited training budgets, we made only a few exploration. We find learning rate 1e-4 is too large and scales down to 2e-5. When training with a large batch size, we find `fp16` less stable than `bf16` and may lead to generation failure. Thus, we switch to `bf16` for training on 64x512x512. For other hyper-parameters, we follow previous works.
34
+
35
+ ## Loss curves
36
+
37
+ 16x256x256 Pretraining Loss Curve
38
+
39
+ ![16x256x256 Pretraining Loss Curve](/assets/readme/report_loss_curve_1.png)
40
+
41
+ 16x256x256 HQ Training Loss Curve
42
+
43
+ ![16x256x256 HQ Training Loss Curve](/assets/readme/report_loss_curve_2.png)
44
+
45
+ 16x512x512 HQ Training Loss Curve
46
+
47
+ ![16x512x512 HQ Training Loss Curve](/assets/readme/report_loss_curve_3.png)
48
+
49
+ > Core Contributor: Zangwei Zheng*, Xiangyu Peng*, Shenggui Li, Hongxing Liu, Yang You
exp_code/1_benchmark/Open-Sora_v12/docs/report_02.md ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open-Sora 1.1 Report
2
+
3
+ - [Model Architecture Modification](#model-architecture-modification)
4
+ - [Support for Multi-time/resolution/aspect ratio/fps Training](#support-for-multi-timeresolutionaspect-ratiofps-training)
5
+ - [Masked DiT as Image/Video-to-Video Model](#masked-dit-as-imagevideo-to-video-model)
6
+ - [Data Collection \& Pipeline](#data-collection--pipeline)
7
+ - [Training Details](#training-details)
8
+ - [Limitation and Future Work](#limitation-and-future-work)
9
+
10
+ In Open-Sora 1.1 release, we train a 700M models on 10M data (Open-Sora 1.0 trained on 400K data) with a better STDiT architecture. We implement the following features mentioned in [sora's report](https://openai.com/research/video-generation-models-as-world-simulators):
11
+
12
+ - Variable durations, resolutions, aspect ratios (Sampling flexibility, Improved framing and composition)
13
+ - Prompting with images and videos (Animating images, Extending generated videos, Video-to-video editing, Connecting videos)
14
+ - Image generation capabilities
15
+
16
+ To achieve this goal, we use multi-task learning in the pretraining stage. For diffusion models, training with different sampled timestep is already a multi-task learning. We further extend this idea to multi-resolution, aspect ratio, frame length, fps, and different mask strategies for image and video conditioned generation. We train the model on **0s~15s, 144p to 720p, various aspect ratios** videos. Although the quality of time consistency is not that high due to limit training FLOPs, we can still see the potential of the model.
17
+
18
+ ## Model Architecture Modification
19
+
20
+ We made the following modifications to the original ST-DiT for better training stability and performance (ST-DiT-2):
21
+
22
+ - **[Rope embedding](https://arxiv.org/abs/2104.09864) for temporal attention**: Following LLM's best practice, we change the sinusoidal positional encoding to rope embedding for temporal attention since it is also a sequence prediction task.
23
+ - **AdaIN and Layernorm for temporal attention**: we wrap the temporal attention with AdaIN and layernorm as the spatial attention to stabilize the training.
24
+ - **[QK-normalization](https://arxiv.org/abs/2302.05442) with [RMSNorm](https://arxiv.org/abs/1910.07467)**: Following [SD3](https://arxiv.org/pdf/2403.03206.pdf), we apply QK-normalization to the all attention for better training stability in half-precision.
25
+ - **Dynamic input size support and video infomation condition**: To support multi-resolution, aspect ratio, and fps training, we make ST-DiT-2 to accept any input size, and automatically scale positional embeddings. Extending [PixArt-alpha](https://github.com/PixArt-alpha/PixArt-alpha)'s idea, we conditioned on video's height, width, aspect ratio, frame length, and fps.
26
+ - **Extending T5 tokens from 120 to 200**: our caption is usually less than 200 tokens, and we find the model can handle longer text well.
27
+
28
+ ## Support for Multi-time/resolution/aspect ratio/fps Training
29
+
30
+ As mentioned in the [sora's report](https://openai.com/research/video-generation-models-as-world-simulators), training with original video's resolution, aspect ratio, and length increase sampling flexibility and improve framing and composition. We found three ways to achieve this goal:
31
+
32
+ - [NaViT](https://arxiv.org/abs/2307.06304): support dynamic size within the same batch by masking, with little efficiency loss. However, the system is a bit complex to implement, and may not benefit from optimized kernels such as flash attention.
33
+ - Padding ([FiT](https://arxiv.org/abs/2402.12376), [Open-Sora-Plan](https://github.com/PKU-YuanGroup/Open-Sora-Plan)): support dynamic size within the same batch by padding. However, padding different resolutions to the same size is not efficient.
34
+ - Bucket ([SDXL](https://arxiv.org/abs/2307.01952), [PixArt](https://arxiv.org/abs/2310.00426)): support dynamic size in different batches by bucketing, but the size must be the same within the same batch, and only a fixed number of size can be applied. With the same size in a batch, we do not need to implement complex masking or padding.
35
+
36
+ For the simplicity of implementation, we choose the bucket method. We pre-define some fixed resolution, and allocate different samples to different bucket. The concern for bucketing is listed below. But we can see that the concern is not a big issue in our case.
37
+
38
+ <details>
39
+ <summary>View the concerns</summary>
40
+
41
+ - The bucket size is limited to a fixed number: First, in real-world applications, only a few aspect ratios (9:16, 3:4) and resolutions (240p, 1080p) are commonly used. Second, we find trained models can generalize well to unseen resolutions.
42
+ - The size in each batch is the same, breaks the i.i.d. assumption: Since we are using multiple GPUs, the local batches on different GPUs have different sizes. We did not see a significant performance drop due to this issue.
43
+ - The may not be enough samples to fill each bucket and the distribution may be biased: First, our dataset is large enough to fill each bucket when local batch size is not too large. Second, we should analyze the data's distribution on sizes and define the bucket size accordingly. Third, an unbalanced distribution did not affect the training process significantly.
44
+ - Different resolutions and frame lengths may have different processing speed: Different from PixArt, which only deals with aspect ratios of similar resolutions (similar token numbers), we need to consider the processing speed of different resolutions and frame lengths. We can use the `bucket_config` to define the batch size for each bucket to ensure the processing speed is similar.
45
+
46
+ </details>
47
+
48
+ ![bucket](/assets/readme/report_bucket.png)
49
+
50
+ As shown in the figure, a bucket is a triplet of `(resolution, num_frame, aspect_ratio)`. We provide pre-defined aspect ratios for different resolution that covers most of the common video aspect ratios. Before each epoch, we shuffle the dataset and allocate the samples to different buckets as shown in the figure. We put a sample into a bucket with largest resolution and frame length that is smaller than the video's.
51
+
52
+ Considering our computational resource is limited, we further introduce two attributes `keep_prob` and `batch_size` for each `(resolution, num_frame)` to reduce the computational cost and enable multi-stage training. Specifically, a high-resolution video will be downsampled to a lower resolution with probability `1-keep_prob` and the batch size for each bucket is `batch_size`. In this way, we can control the number of samples in different buckets and balance the GPU load by search a good batch size for each bucket.
53
+
54
+ A detailed explanation of the bucket usage in training is available in [docs/config.md](/docs/config.md#training-bucket-configs).
55
+
56
+ ## Masked DiT as Image/Video-to-Video Model
57
+
58
+ Transformers can be easily extended to support image-to-image and video-to-video tasks. We propose a mask strategy to support image and video conditioning. The mask strategy is shown in the figure below.
59
+
60
+ ![mask strategy](/assets/readme/report_mask.png)
61
+
62
+ Typically, we unmask the frames to be conditioned on for image/video-to-video condition. During the ST-DiT forward, unmasked frames will have timestep 0, while others remain the same (t). We find directly apply the strategy to trained model yield poor results as the diffusion model did not learn to handle different timesteps in one sample during training.
63
+
64
+ Inspired by [UL2](https://arxiv.org/abs/2205.05131), we introduce random mask strategy during training. Specifically, we randomly unmask the frames during training, including unmask the first frame, the first k frames, the last frame, the last k frames, the first and last k frames, random frames, etc. Based on Open-Sora 1.0, with 50% probability of applying masking, we see the model can learn to handle image conditioning (while 30% yields worse ability) for 10k steps, with a little text-to-video performance drop. Thus, for Open-Sora 1.1, we pretrain the model from scratch with masking strategy.
65
+
66
+ An illustration of masking strategy config to use in inference is given as follow. A five number tuple provides great flexibility in defining the mask strategy. By conditioning on generated frames, we can autogressively generate infinite frames (although error propagates).
67
+
68
+ ![mask strategy config](/assets/readme/report_mask_config.png)
69
+
70
+ A detailed explanation of the mask strategy usage is available in [docs/config.md](/docs/config.md#advanced-inference-config).
71
+
72
+ ## Data Collection & Pipeline
73
+
74
+ As we found in Open-Sora 1.0, the data number and quality are crucial for training a good model, we work hard on scaling the dataset. First, we create an automatic pipeline following [SVD](https://arxiv.org/abs/2311.15127), inlcuding scene cutting, captioning, various scoring and filtering, and dataset management scripts and conventions. More infomation can be found in [docs/data_processing.md](/docs/data_processing.md).
75
+
76
+ ![pipeline](/assets/readme/report_data_pipeline.png)
77
+
78
+ We plan to use [panda-70M](https://snap-research.github.io/Panda-70M/) and other data to traing the model, which is approximately 30M+ data. However, we find disk IO a botteleneck for training and data processing at the same time. Thus, we can only prepare a 10M dataset and did not go through all processing pipeline that we built. Finally, we use a dataset with 9.7M videos + 2.6M images for pre-training, and 560k videos + 1.6M images for fine-tuning. The pretraining dataset statistics are shown below. More information about the dataset can be found in [docs/datasets.md](/docs/datasets.md).
79
+
80
+ Image text tokens (by T5 tokenizer):
81
+
82
+ ![image text tokens](/assets/readme/report_image_textlen.png)
83
+
84
+ Video text tokens (by T5 tokenizer). We directly use panda's short caption for training, and caption other datasets by ourselves. The generated caption is usually less than 200 tokens.
85
+
86
+ ![video text tokens](/assets/readme/report_video_textlen.png)
87
+
88
+ Video duration:
89
+
90
+ ![video duration](/assets/readme/report_video_duration.png)
91
+
92
+ ## Training Details
93
+
94
+ With limited computational resources, we have to carefully monitor the training process, and change the training strategy if we speculate the model is not learning well since there is no computation for ablation study. Thus, Open-Sora 1.1's training includes multiple changes, and as a result, ema is not applied.
95
+
96
+ 1. First, we fine-tune **6k** steps with images of different resolution from `Pixart-alpha-1024` checkpoints. We find the model easily adapts to generate images with different resolutions. We use [SpeeDiT](https://github.com/1zeryu/SpeeDiT) (iddpm-speed) to accelerate the diffusion training.
97
+ 2. **[Stage 1]** Then, we pretrain the model with gradient-checkpointing for **24k** steps, which takes **4 days** on 64 H800 GPUs. Although the number of samples seen by the model is the same, we find the model learns slowly compared to a smaller batch size. We speculate that at an early stage, the number of steps is more important for training. The most videos are in **240p** resolution, and the config is similar to [stage2.py](/configs/opensora-v1-1/train/stage2.py). The video looking is good, but the model does not know much about the temporal knowledge. We use mask ratio of 10%.
98
+ 3. **[Stage 1]** To increase the number of steps, we switch to a smaller batch size without gradient-checkpointing. We also add fps conditioning at this point. We trained **40k** steps for **2 days**. The most videos are in **144p** resolution, and the config file is [stage1.py](/configs/opensora-v1-1/train/stage1.py). We use a lower resolution as we find in Open-Sora 1.0 that the model can learn temporal knowledge with relatively low resolution.
99
+ 4. **[Stage 1]** We find the model cannot learn well for long videos, and find a noised generation result as speculated to be half-precision problem found in Open-Sora 1.0 training. Thus, we adopt the QK-normalization to stabilize the training. Similar to SD3, we find the model quickly adapt to the QK-normalization. We also switch iddpm-speed to iddpm, and increase the mask ratio to 25% as we find image-condition not learning well. We trained for **17k** steps for **14 hours**. The most videos are in **144p** resolution, and the config file is [stage1.py](/configs/opensora-v1-1/train/stage1.py). The stage 1 training lasts for approximately one week, with total step **81k**.
100
+ 5. **[Stage 2]** We switch to a higher resolution, where most videos are in **240p and 480p** resolution ([stage2.py](/configs/opensora-v1-1/train/stage2.py)). We trained **22k** steps for **one day** on all pre-training data.
101
+ 6. **[Stage 3]** We switch to a higher resolution, where most videos are in **480p and 720p** resolution ([stage3.py](/configs/opensora-v1-1/train/stage3.py)). We trained **4k** with **one day** on high-quality data. We find loading previous stage's optimizer state can help the model learn faster.
102
+
103
+ To summarize, the training of Open-Sora 1.1 requires approximately **9 days** on 64 H800 GPUs.
104
+
105
+ ## Limitation and Future Work
106
+
107
+ As we get one step closer to the replication of Sora, we find many limitations for the current model, and these limitations point to the future work.
108
+
109
+ - **Generation Failure**: we fine many cases (especially when the total token number is large or the content is complex), our model fails to generate the scene. There may be a collapse in the temporal attention and we have identified a potential bug in our code. We are working hard to fix it. Besides, we will increase our model size and training data to improve the generation quality in the next version.
110
+ - **Noisy generation and influency**: we find the generated model is sometimes noisy and not fluent, especially for long videos. We think the problem is due to not using a temporal VAE. As [Pixart-Sigma](https://arxiv.org/abs/2403.04692) finds that adapting to a new VAE is simple, we plan to develop a temporal VAE for the model in the next version.
111
+ - **Lack of time consistency**: we find the model cannot generate videos with high time consistency. We think the problem is due to the lack of training FLOPs. We plan to collect more data and continue training the model to improve the time consistency.
112
+ - **Bad human generation**: We find the model cannot generate high-quality human videos. We think the problem is due to the lack of human data. We plan to collect more human data and continue training the model to improve the human generation.
113
+ - **Low aesthetic score**: we find the model's aesthetic score is not high. The problem is due to the lack of aesthetic score filtering, which is not conducted due to IO bottleneck. We plan to filter the data by aesthetic score and finetuning the model to improve the aesthetic score.
114
+ - **Worse quality for longer video generation**: we find with a same prompt, the longer video has worse quality. This means the image quality is not equally adapted to different lengths of sequences.
115
+
116
+ > - **Algorithm & Acceleration**: Zangwei Zheng, Xiangyu Peng, Shenggui Li, Hongxing Liu, Yukun Zhou, Tianyi Li
117
+ > - **Data Collection & Pipeline**: Xiangyu Peng, Zangwei Zheng, Chenhui Shen, Tom Young, Junjie Wang, Chenfeng Yu
exp_code/1_benchmark/Open-Sora_v12/docs/report_03.md ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open-Sora 1.2 Report
2
+
3
+ - [Video compression network](#video-compression-network)
4
+ - [Rectified flow and model adaptation](#rectified-flow-and-model-adaptation)
5
+ - [More data and better multi-stage training](#more-data-and-better-multi-stage-training)
6
+ - [Easy and effective model conditioning](#easy-and-effective-model-conditioning)
7
+ - [Evaluation](#evaluation)
8
+ - [Sequence parallelism](#sequence-parallelism)
9
+
10
+ In Open-Sora 1.2 release, we train a 1.1B models on >30M data (\~80k hours), with training cost 35k H100 GPU hours, supporting 0s\~16s, 144p to 720p, various aspect ratios video generation. Our configurations is listed below. Following our 1.1 version, Open-Sora 1.2 can also do image-to-video generation and video extension.
11
+
12
+ | | image | 2s | 4s | 8s | 16s |
13
+ | ---- | ----- | --- | --- | --- | --- |
14
+ | 240p | ✅ | ✅ | ✅ | ✅ | ✅ |
15
+ | 360p | ✅ | ✅ | ✅ | ✅ | ✅ |
16
+ | 480p | ✅ | ✅ | ✅ | ✅ | 🆗 |
17
+ | 720p | ✅ | ✅ | ✅ | 🆗 | 🆗 |
18
+
19
+ Here ✅ means that the data is seen during training, and 🆗 means although not trained, the model can inference at that config. Inference for 🆗 requires more than one 80G memory GPU and sequence parallelism.
20
+
21
+ Besides features introduced in Open-Sora 1.1, Open-Sora 1.2 highlights:
22
+
23
+ - Video compression network
24
+ - Rectifie-flow training
25
+ - More data and better multi-stage training
26
+ - Easy and effective model conditioning
27
+ - Better evaluation metrics
28
+
29
+ All implementations (both training and inference) of the above improvements are available in the Open-Sora 1.2 release. The following sections will introduce the details of the improvements. We also refine our codebase and documentation to make it easier to use and develop, and add a LLM to [refine input prompts](/README.md#gpt-4o-prompt-refinement) and support more languages.
30
+
31
+ ## Video compression network
32
+
33
+ For Open-Sora 1.0 & 1.1, we used stability-ai's 83M 2D VAE, which compress the video only in the spatial dimension by 8x8 times. To reduce the temporal dimension, we extracted one frame in every three frames. However, this method led to the low fluency of generated video as the generated fps is sacrificed. Thus, in this release, we introduce the video compression network as OpenAI's Sora does. With a 4 times compression in the temporal dimension, we do not need to extract frames and can generate videos with the original fps.
34
+
35
+ Considering the high computational cost of training a 3D VAE, we hope to re-use the knowledge learnt in the 2D VAE. We notice that after 2D VAE's compression, the features adjacent in the temporal dimension are still highly correlated. Thus, we propose a simple video compression network, which first compress the video in the spatial dimension by 8x8 times, then compress the video in the temporal dimension by 4x times. The network is shown below:
36
+
37
+ ![video_compression_network](/assets/readme/report_3d_vae.png)
38
+
39
+ We initialize the 2D VAE with [SDXL's VAE](https://huggingface.co/stabilityai/sdxl-vae), which is better than our previously used one. For the 3D VAE, we adopt the structure of VAE in [Magvit-v2](https://magvit.cs.cmu.edu/v2/), which contains 300M parameters. Along with 83M 2D VAE, the total parameters of the video compression network is 384M. We train the 3D VAE for 1.2M steps with local batch size 1. The training data is videos from pixels and pixabay, and the training video size is mainly 17 frames, 256x256 resolution. Causal convolutions are used in the 3D VAE to make the image reconstruction more accurate.
40
+
41
+ Our training involves three stages:
42
+
43
+ 1. For the first 380k steps, we train on 8 GPUs and freeze the 2D VAE. The training objective includes the reconstruction of the compressed features from 2D VAE (pink one in the figure) and also add a loss to make features from the 3D VAE similar to the features from the 2D VAE (pink one and green one, called identity loss). We find the latter loss can quickly make the whole VAE achieve a good performance for image and much faster to converge in the next stage.
44
+ 2. For the next 260k steps, We remove the identity loss and just learn the 3D VAE.
45
+ 3. For the last 540k steps , since we find only reconstruction 2D VAE's feature cannot lead to further improvement, we remove the loss and train the whole VAE to reconstruct the original videos. This stage is trained on on 24 GPUs.
46
+
47
+ For both stage 1 and stage 2 training, we adopt 20% images and 80% videos. Following [Magvit-v2](https://magvit.cs.cmu.edu/v2/), we train video using 17 frames, while zero-padding the first 16 frames for image. However, we find that this setting leads to blurring of videos with length different from 17 frames. Thus, in stage 3, we use a random number within 34 frames for mixed video length training (a.k.a., zero-pad the first `43-n` frames if we want to train a `n` frame video), to make our VAE more robust to different video lengths. Our [training](/scripts/train_vae.py) and [inference](/scripts/inference_vae.py) code is available in the Open-Sora 1.2 release.
48
+
49
+ When using the VAE for diffusion model, our stacked VAE requires small memory as the our VAE's input is already compressed. We also split the input videos input several 17 frames clips to make the inference more efficient. The performance of our VAE is on par with another open-sourced 3D VAE in [Open-Sora-Plan](https://github.com/PKU-YuanGroup/Open-Sora-Plan/blob/main/docs/Report-v1.1.0.md).
50
+
51
+ | Model | SSIM↑ | PSNR↑ |
52
+ | ------------------ | ----- | ------ |
53
+ | Open-Sora-Plan 1.1 | 0.882 | 29.890 |
54
+ | Open-Sora 1.2 | 0.880 | 30.590 |
55
+
56
+ ## Rectified flow and model adaptation
57
+
58
+ Lastest diffusion model like Stable Diffusion 3 adopts the [rectified flow](https://github.com/gnobitab/RectifiedFlow) instead of DDPM for better performance. Pitiably, SD3's rectified flow training code is not open-sourced. However, Open-Sora 1.2 provides the training code following SD3's paper, including:
59
+
60
+ - Basic rectified flow training ([original rectified flow paper](https://arxiv.org/abs/2209.03003))
61
+ - Logit-norm sampling for training acceleration ([SD3 paper](https://arxiv.org/pdf/2403.03206) Section 3.1, intuitively it is more likely to sample timesteps at middle noise level)
62
+ - Resolution and video length aware timestep sampling ([SD3 paper](https://arxiv.org/pdf/2403.03206) Section 5.3.2, intuitively it is more likely to sample timesteps with more noise for larger resolution, and we extend it to longer video)
63
+
64
+ For the resolution-aware timestep sampling, we should use more noise for images with larger resolution. We extend this idea to video generation and use more noise for videos with longer length.
65
+
66
+ Open-Sora 1.2 starts from the [PixArt-Σ 2K](https://github.com/PixArt-alpha/PixArt-sigma) checkpoint. Note that this model is trained with DDPM and SDXL VAE, also a much higher resolution. We find finetuning on a small dataset can easily adapt the model for our video generation setting. The adaptation process is as follows, all training is done on 8 GPUs (the adaptation for the diffusion model is quite fast and straightforward):
67
+
68
+ 1. Multi-resolution image generation ability: we train the model to generate different resolution ranging from 144p to 2K for 20k steps.
69
+ 2. QK-norm: we add the QK-norm to the model and train for 18k steps.
70
+ 3. Rectified flow: we transform from discrete-time DDPM to continuous-time rectified flow and train for 10k steps.
71
+ 4. Rectified flow with logit-norm sampling and resolution-aware timestep sampling: we train for 33k steps.
72
+ 5. Smaller AdamW epsilon: following SD3, with QK-norm, we can use a smaller epsilon (1e-15) for AdamW, we train for 8k steps.
73
+ 6. New VAE and fps conditioning: we replace the original VAE with ours and add fps conditioning to the timestep conditioning, we train for 25k steps. Note that normalizing each channel is important for rectified flow training.
74
+ 7. Temporal attention blocks: we add temporal attention blocks with zero initialized projection layers. We train on images for 3k steps.
75
+ 8. Temporal blocks only for video with mask strategy: we train the temporal attention blocks only on videos for 38k steps.
76
+
77
+ After the above adaptation, we are ready to train the model on videos. The adaptation above maintains the original model's ability to generate high-quality images, and brings multiple benefits for video generation:
78
+
79
+ - With rectified flow, we can accelerate the training and reduce the number of sampling steps for video from 100 to 30, which greatly reduces the waiting time for inference.
80
+ - With qk-norm, the training is more stablized and an aggressive optimizer can be used.
81
+ - With new VAE, the temporal dimension is compressed by 4 times, which makes the training more efficient.
82
+ - With multi-resolution image generation ability, the model can generate videos with different resolutions.
83
+
84
+ ## More data and better multi-stage training
85
+
86
+ Due to a limited computational budget, we carefully arrange the training data from low to high quality and split our training into three stages. Our training involves 12x8 GPUs, and the total training time is about 2 weeks for about 70k steps.
87
+
88
+ ### First stage
89
+
90
+ We first train the model on Webvid-10M datasets (40k hours) for 30k steps (2 epochs). Since the video is all lower than 360p resolution and contains watermark, we train on this dataset first. The training mainly happens on 240p and 360p, with video length 2s~16s. We use the original caption in the dataset for training. The training config locates in [stage1.py](/configs/opensora-v1-2/train/stage1.py).
91
+
92
+ ### Second stage
93
+
94
+ Then we train the model on Panda-70M datasets. This dataset is large but the quality varies. We use the official 30M subset which clips are more diverse, and filter out videos with aesthetic score lower than 4.5. This leads to a 20M subset with 41k hours. The captions in the dataset are directly used for our training. The training config locates in [stage2.py](/configs/opensora-v1-2/train/stage2.py).
95
+
96
+ The training mainly happens on 360p and 480p. We train the model for 23k steps, which is 0.5 epoch. The training is not fully done since we hope our new model can meet you earlier.
97
+
98
+ ### Third stage
99
+
100
+ In this stage, we collect ~2M video clips with a total length of 5K hours from all kinds of sources, including:
101
+
102
+ - Free-license videos, sourced from Pexels, Pixabay, Mixkit, etc.
103
+ - [MiraData](https://github.com/mira-space/MiraData): a high-quality dataset with long videos, mainly from games and city/scenic exploration.
104
+ - [Vript](https://github.com/mutonix/Vript/tree/main): a densely annotated dataset.
105
+ - And some other datasets.
106
+
107
+ While MiraData and Vript have captions from GPT, we use [PLLaVA](https://github.com/magic-research/PLLaVA) to caption the rest ones. Compared with LLaVA, which is only capable of single frame/image captioning, PLLaVA is specially designed and trained for video captioning. The [accelerated PLLaVA](/tools/caption/README.md#pllava-captioning) is released in our `tools/`. In practice, we use the pretrained PLLaVA 13B model and select 4 frames from each video for captioning with a spatial pooling shape of 2*2.
108
+
109
+ Some statistics of the video data used in this stage are shown below. We present basic statistics of duration and resolution, as well as aesthetic score and optical flow score distribution.
110
+ We also extract tags for objects and actions from video captions and count their frequencies.
111
+ ![stats](/assets/readme/report-03_video_stats.png)
112
+ ![object_count](/assets/readme/report-03_objects_count.png)
113
+ ![object_count](/assets/readme/report-03_actions_count.png)
114
+
115
+ We mainly train 720p and 1080p videos in this stage, aiming to extend the model's ability to larger resolutions. We use a mask ratio of 25% during training. The training config locates in [stage3.py](/configs/opensora-v1-2/train/stage3.py). We train the model for 15k steps, which is approximately 2 epochs.
116
+
117
+ ## Easy and effective model conditioning
118
+
119
+ For stage 3, we calculate the aesthetic score and motion score for each video clip. However, since the number of video clips is small, we are not willing to filter out clips with low scores, which leads to a smaller dataset. Instead, we append the scores to the captions and use them as conditioning. We find this method can make model aware of the scores and follows the scores to generate videos with better quality.
120
+
121
+ For example, a video with aesthetic score 5.5, motion score 10, and a detected camera motion pan left, the caption will be:
122
+
123
+ ```plaintext
124
+ [Original Caption] aesthetic score: 5.5, motion score: 10, camera motion: pan left.
125
+ ```
126
+
127
+ During inference, we can also use the scores to condition the model. For camera motion, we only label 13k clips with high confidence, and the camera motion detection module is released in our tools.
128
+
129
+ ## Evaluation
130
+
131
+ Previously, we monitor the training process only by human evaluation, as DDPM traning loss is not well correlated with the quality of generated videos. However, for rectified flow, we find the training loss is well correlated with the quality of generated videos as stated in SD3. Thus, we keep track of rectified flow evaluation loss on 100 images and 1k videos.
132
+
133
+ We sampled 1k videos from pixabay as validation dataset. We calculate the evaluation loss for image and different lengths of videos (2s, 4s, 8s, 16s) for different resolution (144p, 240p, 360p, 480p, 720p). For each setting, we equidistantly sample 10 timesteps. Then all the losses are averaged. We also provide a [video](https://streamable.com/oqkkf1) showing the sampled videos with a fixed prompt for different steps.
134
+
135
+ ![Evaluation Loss](/assets/readme/report_val_loss.png)
136
+ ![Video Evaluation Loss](/assets/readme/report_vid_val_loss.png)
137
+
138
+ In addition, we also keep track of [VBench](https://vchitect.github.io/VBench-project/) scores during training. VBench is an automatic video evaluation benchmark for short video generation. We calcuate the vbench score with 240p 2s videos. The two metrics verify that our model continues to improve during training.
139
+
140
+ ![VBench](/assets/readme/report_vbench_score.png)
141
+
142
+ All the evaluation code is released in `eval` folder. Check the [README](/eval/README.md) for more details.
143
+
144
+ | Model | Total Score | Quality Score | Semantic Score |
145
+ | -------------- | ----------- | ------------- | -------------- |
146
+ | Open-Sora V1.0 | 75.91% | 78.81% | 64.28% |
147
+ | Open-Sora V1.2 | 79.23% | 80.71% | 73.30% |
148
+
149
+ ## Sequence parallelism
150
+
151
+ We use sequence parallelism to support long-sequence training and inference. Our implementation is based on Ulysses and the workflow is shown below. When sequence parallelism is enabled, we only need to apply the `all-to-all` communication to the spatial block in STDiT as only spatial computation is dependent on the sequence dimension.
152
+
153
+ ![SP](../assets/readme/sequence_parallelism.jpeg)
154
+
155
+ Currently, we have not used sequence parallelism for training as data resolution is small and we plan to do so in the next release. As for inference, we can use sequence parallelism in case your GPU goes out of memory. A simple benchmark shows that sequence parallelism can achieve speedup
156
+
157
+ | Resolution | Seconds | Number of GPUs | Enable SP | Time taken/s | Speedup per GPU |
158
+ | ---------- | ------- | -------------- | --------- | ------------ | --------------- |
159
+ | 720p | 16s | 1 | No | 547.97 | - |
160
+ | 720p | 16s | 2 | Yes | 244.38 | 12% |
exp_code/1_benchmark/Open-Sora_v12/docs/structure.md ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Repo Structure
2
+
3
+ ```plaintext
4
+ Open-Sora
5
+ ├── README.md
6
+ ├── assets
7
+ │ ├── images -> images used for image-conditioned generation
8
+ │ ├── demo -> images used for demo
9
+ │ ├── texts -> prompts used for text-conditioned generation
10
+ │ └── readme -> images used in README
11
+ ├── configs -> Configs for training & inference
12
+ ├── docker -> dockerfile for Open-Sora
13
+ ├── docs
14
+ │ ├── acceleration.md -> Report on acceleration & speed benchmark
15
+ │ ├── commands.md -> Commands for training & inference
16
+ │ ├── datasets.md -> Datasets used in this project
17
+ | ├── data_processing.md -> Data pipeline documents
18
+ | ├── installation.md -> Data pipeline documents
19
+ │ ├── structure.md -> This file
20
+ │ ├── config.md -> Configs for training and inference
21
+ │ ├── report_01.md -> Report for Open-Sora 1.0
22
+ │ ├── report_02.md -> Report for Open-Sora 1.1
23
+ │ ├── report_03.md -> Report for Open-Sora 1.2
24
+ │ ├── vae.md -> our VAE report
25
+ │ └── zh_CN -> Chinese version of the above
26
+ ├── eval -> Evaluation scripts
27
+ │ ├── README.md -> Evaluation documentation
28
+ | ├── human_eval -> for human eval
29
+ | ├── launch.sh -> script for launching 8 cards sampling
30
+ | ├── loss -> eval loss
31
+ | ├── sample.sh -> script for quickly launching inference on predefined prompts
32
+ | ├── vae -> for vae eval
33
+ | ├── vbench -> for VBench evaluation
34
+ │ └── vbench_i2v -> for VBench i2v evaluation
35
+ ├── gradio -> Gradio demo related code
36
+ ├── notebooks -> Jupyter notebooks for generating commands to run
37
+ ├── scripts
38
+ │ ├── train.py -> diffusion training script
39
+ │ ├── train_vae.py -> vae training script
40
+ │ ├── inference.py -> diffusion inference script
41
+ │ ├── inference_vae.py -> vae inference script
42
+ │ └── misc -> misc scripts, including batch size search
43
+ ├── opensora
44
+ │ ├── __init__.py
45
+ │ ├── registry.py -> Registry helper
46
+ │   ├── acceleration -> Acceleration related code
47
+ │   ├── datasets -> Dataset related code
48
+ │   ├── models
49
+ │   │   ├── dit -> DiT
50
+ │   │   ├── layers -> Common layers
51
+ │   │   ├── vae -> VAE as image encoder
52
+ │   │   ├── text_encoder -> Text encoder
53
+ │   │   │   ├── classes.py -> Class id encoder (inference only)
54
+ │   │   │   ├── clip.py -> CLIP encoder
55
+ │   │   │   └── t5.py -> T5 encoder
56
+ │   │   ├── dit
57
+ │   │   ├── latte
58
+ │   │   ├── pixart
59
+ │   │   └── stdit -> Our STDiT related code
60
+ │   ├── schedulers -> Diffusion schedulers
61
+ │   │   ├── iddpm -> IDDPM for training and inference
62
+ │   │ └── dpms -> DPM-Solver for fast inference
63
+ │ └── utils
64
+ ├── tests -> Tests for the project
65
+ └── tools -> Tools for data processing and more
66
+ ```
67
+
68
+ ## Configs
69
+
70
+ Our config files follows [MMEgine](https://github.com/open-mmlab/mmengine). MMEngine will reads the config file (a `.py` file) and parse it into a dictionary-like object.
71
+
72
+ ```plaintext
73
+ Open-Sora
74
+ └── configs -> Configs for training & inference
75
+ ├── opensora-v1-1 -> STDiT2 related configs
76
+ │ ├── inference
77
+ │ │ ├── sample.py -> Sample videos and images
78
+ │ │ └── sample-ref.py -> Sample videos with image/video condition
79
+ │ └── train
80
+ │ ├── stage1.py -> Stage 1 training config
81
+ │ ├── stage2.py -> Stage 2 training config
82
+ │ ├── stage3.py -> Stage 3 training config
83
+ │ ├── image.py -> Illustration of image training config
84
+ │ ├── video.py -> Illustration of video training config
85
+ │ └── benchmark.py -> For batch size searching
86
+ ├── opensora -> STDiT related configs
87
+ │ ├── inference
88
+ │ │ ├── 16x256x256.py -> Sample videos 16 frames 256x256
89
+ │ │ ├── 16x512x512.py -> Sample videos 16 frames 512x512
90
+ │ │ └── 64x512x512.py -> Sample videos 64 frames 512x512
91
+ │ └── train
92
+ │ ├── 16x256x256.py -> Train on videos 16 frames 256x256
93
+ │ ├── 16x256x256.py -> Train on videos 16 frames 256x256
94
+ │ └── 64x512x512.py -> Train on videos 64 frames 512x512
95
+ ├── dit -> DiT related configs
96
+    │   ├── inference
97
+    │   │   ├── 1x256x256-class.py -> Sample images with ckpts from DiT
98
+    │   │   ├── 1x256x256.py -> Sample images with clip condition
99
+    │   │   └── 16x256x256.py -> Sample videos
100
+    │   └── train
101
+    │     ├── 1x256x256.py -> Train on images with clip condition
102
+    │      └── 16x256x256.py -> Train on videos
103
+ ├── latte -> Latte related configs
104
+ └── pixart -> PixArt related configs
105
+ ```
106
+
107
+ ## Tools
108
+
109
+ ```plaintext
110
+ Open-Sora
111
+ └── tools
112
+ ├── datasets -> dataset management related code
113
+ ├── scene_cut -> scene cut related code
114
+ ├── caption -> caption related code
115
+ ├── scoring -> scoring related code
116
+ │ ├── aesthetic -> aesthetic scoring related code
117
+ │ ├── matching -> matching scoring related code
118
+ │ ├── ocr -> ocr scoring related code
119
+ │ └── optical_flow -> optical flow scoring related code
120
+ └── frame_interpolation -> frame interpolation related code
exp_code/1_benchmark/Open-Sora_v12/docs/vae.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # VAE Report
2
+
3
+ As [Pixart-Sigma](https://arxiv.org/abs/2403.04692) finds that adapting to a new VAE is simple, we develop an additional temporal VAE.
4
+ Specifically, our VAE consists of a pipeline of a [spatial VAE](https://huggingface.co/PixArt-alpha/pixart_sigma_sdxlvae_T5_diffusers) followed by a temporal VAE.
5
+ For the temporal VAE, we follow the implementation of [MAGVIT-v2](https://arxiv.org/abs/2310.05737), with the following modifications:
6
+
7
+ * We remove the architecture specific to the codebook.
8
+ * We do not use the discriminator, and use the VAE reconstruction loss, kl loss, and perceptual loss for training.
9
+ * In the last linear layer of the encoder, we scale down to a diagonal Gaussian Distribution of 4 channels, following our previously trained STDiT that takes in 4 channels input.
10
+ * Our decoder is symmetric to the encoder architecture.
11
+
12
+ ## Training
13
+
14
+ We train the model in different stages.
15
+
16
+ We first train the temporal VAE only by freezing the spatial VAE for 380k steps on a single machine (8 GPUs).
17
+ We use an additional identity loss to make features from the 3D VAE similar to the features from the 2D VAE.
18
+ We train the VAE using 20% images and 80% videos with 17 frames.
19
+
20
+ ```bash
21
+ torchrun --nnodes=1 --nproc_per_node=8 scripts/train_vae.py configs/vae/train/stage1.py --data-path YOUR_CSV_PATH
22
+ ```
23
+
24
+ Next, we remove the identity loss and train the 3D VAE pipeline to reconstructe the 2D-compressed videos for 260k steps.
25
+
26
+ ```bash
27
+ torchrun --nnodes=1 --nproc_per_node=8 scripts/train_vae.py configs/vae/train/stage2.py --data-path YOUR_CSV_PATH
28
+ ```
29
+
30
+ Finally, we remove the reconstruction loss for the 2D-compressed videos and train the VAE pipeline to construct the 3D videos for 540k steps.
31
+ We train our VAE with a random number within 34 frames to make it more robust to different video lengths.
32
+ This stage is trained on 24 GPUs.
33
+
34
+ ```bash
35
+ torchrun --nnodes=3 --nproc_per_node=8 scripts/train_vae.py configs/vae/train/stage3.py --data-path YOUR_CSV_PATH
36
+ ```
37
+
38
+ Note that you need to adjust the `epochs` in the config file accordingly with respect to your own csv data size.
39
+
40
+ ## Inference
41
+
42
+ To visually check the performance of the VAE, you may run the following inference.
43
+ It saves the original video to your specified video directory with `_ori` postfix (i.e. `"YOUR_VIDEO_DIR"_ori`), the reconstructed video from the full pipeline with the `_rec` postfix (i.e. `"YOUR_VIDEO_DIR"_rec`), and the reconstructed video from the 2D compression and decompression with the `_spatial` postfix (i.e. `"YOUR_VIDEO_DIR"_spatial`).
44
+
45
+ ```bash
46
+ torchrun --standalone --nnodes=1 --nproc_per_node=1 scripts/inference_vae.py configs/vae/inference/video.py --ckpt-path YOUR_VAE_CKPT_PATH --data-path YOUR_CSV_PATH --save-dir YOUR_VIDEO_DIR
47
+ ```
48
+ ## Evaluation
49
+
50
+ We can then calculate the scores of the VAE performances on metrics of SSIM, PSNR, LPIPS, and FLOLPIPS.
51
+
52
+ * SSIM: structural similarity index measure, the higher the better
53
+ * PSNR: peak-signal-to-noise ratio, the higher the better
54
+ * LPIPS: learned perceptual image quality degradation, the lower the better
55
+ * [FloLPIPS](https://arxiv.org/pdf/2207.08119): LPIPS with video interpolation, the lower the better.
56
+
57
+ ```bash
58
+ python eval/vae/eval_common_metric.py --batch_size 2 --real_video_dir YOUR_VIDEO_DIR_ori --generated_video_dir YOUR_VIDEO_DIR_rec --device cuda --sample_fps 24 --crop_size 256 --resolution 256 --num_frames 17 --sample_rate 1 --metric ssim psnr lpips flolpips
59
+ ```
60
+
61
+ ## Acknowledgement
62
+ We are grateful for the following work:
63
+ * [MAGVIT-v2](https://arxiv.org/abs/2310.05737): Language Model Beats Diffusion -- Tokenizer is Key to Visual Generation
64
+ * [Taming Transformers](https://github.com/CompVis/taming-transformers): Taming Transformers for High-Resolution Image Synthesis
65
+ * [3D blur pooling](https://github.com/adobe/antialiased-cnns/pull/39/commits/3d6f02b6943c58b68c19c07bc26fad57492ff3bc)
66
+ * [Open-Sora-Plan](https://github.com/PKU-YuanGroup/Open-Sora-Plan)
exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/README.md ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <p align="center">
2
+ <img src="../../assets/readme/icon.png" width="250"/>
3
+ </p>
4
+ <div align="center">
5
+ <a href="https://github.com/hpcaitech/Open-Sora/stargazers"><img src="https://img.shields.io/github/stars/hpcaitech/Open-Sora?style=social"></a>
6
+ <a href="https://hpcaitech.github.io/Open-Sora/"><img src="https://img.shields.io/badge/Gallery-View-orange?logo=&amp"></a>
7
+ <a href="https://discord.gg/kZakZzrSUT"><img src="https://img.shields.io/badge/Discord-join-blueviolet?logo=discord&amp"></a>
8
+ <a href="https://join.slack.com/t/colossalaiworkspace/shared_invite/zt-247ipg9fk-KRRYmUl~u2ll2637WRURVA"><img src="https://img.shields.io/badge/Slack-ColossalAI-blueviolet?logo=slack&amp"></a>
9
+ <a href="https://twitter.com/yangyou1991/status/1769411544083996787?s=61&t=jT0Dsx2d-MS5vS9rNM5e5g"><img src="https://img.shields.io/badge/Twitter-Discuss-blue?logo=twitter&amp"></a>
10
+ <a href="https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/WeChat.png"><img src="https://img.shields.io/badge/微信-小助手加群-green?logo=wechat&amp"></a>
11
+ <a href="https://hpc-ai.com/blog/open-sora-v1.0"><img src="https://img.shields.io/badge/Open_Sora-Blog-blue"></a>
12
+ <a href="https://huggingface.co/spaces/hpcai-tech/open-sora"><img src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Gradio Demo-blue"></a>
13
+ </div>
14
+
15
+ ## Open-Sora: 让所有人都能轻松制作高效视频
16
+
17
+ 我们设计并实施了**Open-Sora**,这是一项致力于高效制作高质量视频的计划。我们希望让所有人都能使用模型、工具和所有细节。通过采用开源原则,Open-Sora 不仅使高级视频生成技术的使用变得民主化,而且还提供了一个简化且用户友好的平台,简化了视频生成的复杂性。借助 Open-Sora,我们的目标是在内容创作领域促进创新、创造力和包容性。
18
+
19
+ [[中文文档](/docs/zh_CN/README.md)] [[潞晨云](https://cloud.luchentech.com/)|[OpenSora镜像](https://cloud.luchentech.com/doc/docs/image/open-sora/)|[视频教程](https://www.bilibili.com/video/BV1ow4m1e7PX/?vd_source=c6b752764cd36ff0e535a768e35d98d2)]
20
+
21
+ ## 📰 资讯
22
+
23
+ * **[2024.06.22]** 🔥我们在[潞晨云](https://cloud.luchentech.com/)上发布了Open-Sora1.2镜像,并在B站上传了详细的[使用教程](https://www.bilibili.com/video/BV1ow4m1e7PX/)
24
+ * **[2024.06.17]** 🔥我们发布了**Open-Sora 1.2**,其中包括**3D-VAE**,**整流流**和**得分条件**。视频质量大大提高。[[模型权重]](#模型权重) [[技术报告]](report_v3.md) [[公众号文章]](https://mp.weixin.qq.com/s/QHq2eItZS9e00BVZnivdjg)
25
+ * **[2024.04.25]** 🤗 我们在 Hugging Face Spaces 上发布了 [Open-Sora的Gradio演示](https://huggingface.co/spaces/hpcai-tech/open-sora)。
26
+ * **[2024.04.25]** 我们发布了**Open-Sora 1.1**,支持**2s~15s、144p 到 720p、任意比例的文本转图片、文本转视频、图片转视频、视频转视频、无限时间生成**。此外,还发布了完整的视频处理管道。 [[模型权重]](#模型权重) [[技术报告]](report_v2.md)[[公众号文章]](https://mp.weixin.qq.com/s/nkPSTep2se__tzp5OfiRQQ)
27
+ * **[2024.03.18]** 我们发布了 **Open-Sora 1.0**, 一个完全开源的视频生成项目。Open-Sora 1.0 支持完整的视频数据预处理流程、加速训练
28
+ <a href="https://github.com/hpcaitech/ColossalAI"><img src="/assets/readme/colossal_ai.png" width="8%" ></a>
29
+ 、推理等。我们的模型只需 3 天的训练就可以生成 2 秒的 512x512 视频。 [[模型权重]](#模型权重)
30
+ [[公众号文章]](https://mp.weixin.qq.com/s/H52GW8i4z1Dco3Sg--tCGw) [[技术报告]](report_v1.md)
31
+ * **[2024.03.04]** Open-Sora 提供培训,成本降低 46%。
32
+ [[公众号文章]](https://mp.weixin.qq.com/s/OjRUdrM55SufDHjwCCAvXg)
33
+
34
+ ## 🎥 Latest Demo
35
+
36
+ 🔥 您可以在HuggingFace上的 [🤗 Gradio应用程序](https://huggingface.co/spaces/hpcai-tech/open-sora)上体验Open-Sora. 我们的[画廊](https://hpcaitech.github.io/Open-Sora/)中提供了更多示例.
37
+
38
+ | **4s 720×1280** | **4s 720×1280** | **4s 720×1280** |
39
+ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |
40
+ | [<img src="/assets/demo/v1.2/sample_0013.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/7895aab6-ed23-488c-8486-091480c26327) | [<img src="/assets/demo/v1.2/sample_1718.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/20f07c7b-182b-4562-bbee-f1df74c86c9a) | [<img src="/assets/demo/v1.2/sample_0087.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/3d897e0d-dc21-453a-b911-b3bda838acc2) |
41
+ | [<img src="/assets/demo/v1.2/sample_0052.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/644bf938-96ce-44aa-b797-b3c0b513d64c) | [<img src="/assets/demo/v1.2/sample_1719.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/272d88ac-4b4a-484d-a665-8d07431671d0) | [<img src="/assets/demo/v1.2/sample_0002.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/ebbac621-c34e-4bb4-9543-1c34f8989764) |
42
+ | [<img src="/assets/demo/v1.2/sample_0011.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/a1e3a1a3-4abd-45f5-8df2-6cced69da4ca) | [<img src="/assets/demo/v1.2/sample_0004.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/d6ce9c13-28e1-4dff-9644-cc01f5f11926) | [<img src="/assets/demo/v1.2/sample_0061.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/561978f8-f1b0-4f4d-ae7b-45bec9001b4a) |
43
+
44
+ <details>
45
+ <summary>OpenSora 1.1 演示</summary>
46
+
47
+ | **2秒 240×426** | **2秒 240×426** |
48
+ | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
49
+ | [<img src="/assets/demo/sample_16x240x426_9.gif" width="">](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/c31ebc52-de39-4a4e-9b1e-9211d45e05b2) | [<img src="/assets/demo/sora_16x240x426_26.gif" width="">](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/c31ebc52-de39-4a4e-9b1e-9211d45e05b2) |
50
+ | [<img src="/assets/demo/sora_16x240x426_27.gif" width="">](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/f7ce4aaa-528f-40a8-be7a-72e61eaacbbd) | [<img src="/assets/demo/sora_16x240x426_40.gif" width="">](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/5d58d71e-1fda-4d90-9ad3-5f2f7b75c6a9) |
51
+
52
+ | **2秒 426×240** | **4秒 480×854** |
53
+ | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
54
+ | [<img src="/assets/demo/sora_16x426x240_24.gif" width="">](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/34ecb4a0-4eef-4286-ad4c-8e3a87e5a9fd) | [<img src="/assets/demo/sample_32x480x854_9.gif" width="">](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/c1619333-25d7-42ba-a91c-18dbc1870b18) |
55
+
56
+ | **16秒 320×320** | **16秒 224×448** | **2秒 426×240** |
57
+ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------- |
58
+ | [<img src="/assets/demo/sample_16s_320x320.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/3cab536e-9b43-4b33-8da8-a0f9cf842ff2) | [<img src="/assets/demo/sample_16s_224x448.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/9fb0b9e0-c6f4-4935-b29e-4cac10b373c4) | [<img src="/assets/demo/sora_16x426x240_3.gif" width="">](https://github.com/hpcaitech/Open-Sora-dev/assets/99191637/3e892ad2-9543-4049-b005-643a4c1bf3bf) |
59
+
60
+
61
+ </details>
62
+
63
+ <details>
64
+ <summary>OpenSora 1.0 Demo</summary>
65
+
66
+ | **2秒 512×512** | **2秒 512×512** | **2秒 512×512** |
67
+ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- |
68
+ | [<img src="/assets/readme/sample_0.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/de1963d3-b43b-4e68-a670-bb821ebb6f80) | [<img src="/assets/readme/sample_1.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/13f8338f-3d42-4b71-8142-d234fbd746cc) | [<img src="/assets/readme/sample_2.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/fa6a65a6-e32a-4d64-9a9e-eabb0ebb8c16) |
69
+ |森林地区宁静的夜景。 [...] 该视频是一段延时摄影,捕捉了白天到夜晚的转变,湖泊和森林始终作为背景。 | 无人机拍摄的镜头捕捉到了海岸悬崖的壮丽美景,[...] 海水轻轻地拍打着岩石底部和紧贴悬崖顶部的绿色植物。| 瀑布从悬崖上倾泻而下,流入宁静的湖泊,气势磅礴。[...] 摄像机角度提供了瀑布的鸟瞰图。 |
70
+ | [<img src="/assets/readme/sample_3.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/64232f84-1b36-4750-a6c0-3e610fa9aa94) | [<img src="/assets/readme/sample_4.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/983a1965-a374-41a7-a76b-c07941a6c1e9) | [<img src="/assets/readme/sample_5.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/ec10c879-9767-4c31-865f-2e8d6cf11e65) |
71
+ | 夜晚繁华的城市街道,充满了汽车前灯的光芒和路灯的氛围光。 [...] | 向日葵田的生机勃勃,美不胜收。向日葵整齐排列,给人一种秩序感和对称感。 [...] |宁静的水下场景,一只海龟在珊瑚礁中游动。这只海龟的壳呈绿褐色 [...] |
72
+
73
+ 视频经过降采样以.gif用于显示。单击查看原始视频。提示经过修剪以用于显示,请参阅[此处](/assets/texts/t2v_samples.txt)查看完整提示。
74
+
75
+ </details>
76
+
77
+ ## 🔆 新功能/更新
78
+
79
+ * 📍 **Open-Sora 1.2** 发布。模型权重可在[此处](#model-weights)查看。有关更多详细信息,请参阅我们的**[技术报告 v1.2](docs/report_03.md)** 。
80
+ * ✅ 支持整流流调度。
81
+ * ✅ 训练我们的 3D-VAE 进行时间维度压缩。
82
+ * 📍 **Open-Sora 1.1**发布。模型权重可在[此处](#model-weights)获得。它针对**0s~15s、144p 到 720p、各种宽高比**的视频进行训练。有关更多讨论,请参阅我们的**[技术报告 v1.1](/docs/report_02.md)** 。
83
+ * 🔧 **数据处理流程** v1.1发布,提供从原始视频到(文本,视频片段)对的自动处理流程,包括场景剪切$\rightarrow$过滤(美学、光流、OCR 等)$\rightarrow$字幕$\rightarrow$管理。使用此工具,您可以轻松构建视频数据集。
84
+ * ✅ 改进的 ST-DiT 架构包括 rope 位置编码、qk 范数、更长的文本长度等。
85
+ * ✅ 支持任意分辨率、纵横比和时长(包括图像)的训练。
86
+ * ✅ 支持图像和视频调节以及视频编辑,从而支持动画图像,连接视频等。
87
+ * 📍 **Open-Sora 1.0**发布。模型权重可在[此处](#model-weights)获得。仅使用 400K 视频片段和 200 个 H800 天(相比稳定视频扩散中的 152M 样本),我们就能生成 2s 512×512 视频。有关更多讨论,请参阅我们的**[技术报告 v1.0](docs/report_01.md)**。
88
+ * ✅从图像扩散模型到视频扩散模型的三阶段训练。我们为每个阶段提供权重。
89
+ * ✅ 支持训练加速,包括加速 Transformer、更快的 T5 和 VAE 以及序列并行。Open-Sora 在 64x512x512 视频上训练时可将训练速度提高**55%**。详细信息位于[训练加速.md](docs/acceleration.md)。
90
+ * 🔧 **数据预处理流程 v1.0**,包括 [下载](tools/datasets/README.md), [视频剪辑](tools/scene_cut/README.md), 和 [字幕](tools/caption/README.md) 工具. 我们的数据收集计划可在 [数据集.md](docs/datasets.md)中找到.
91
+
92
+ <details>
93
+ <summary>查看更多</summary>
94
+
95
+ ✅ 我们发现[VideoGPT](https://wilson1yan.github.io/videogpt/index.html)的 VQ-VAE质量较低,因此采用了[Stability-AI](https://huggingface.co/stabilityai/sd-vae-ft-mse-original)中的更好的 VAE 。我们还发现时间维度的修补会降低质量。有关更多讨论,请参阅我们的**[技术报告v1.0](docs/report_01.md)**。
96
+ ✅ 我们研究了不同的架构,包括 DiT、Latte 和我们提出的 **STDiT**。我们的STDiT在质量和速度之间实现了更好的平衡。请参阅我们的 **[技术报告v1.0](docs/report_01.md)**以了解更多讨论。
97
+ ✅ 支持剪辑和T5文本调节。
98
+ ✅ 通过将图像视为单帧视频,我们的项目支持在图像和视频上训练 DiT(例如 ImageNet 和 UCF101)。有关更多说明,请参阅[commands.md](docs/commands.md) 。
99
+ ✅ 支持使用[DiT](https://github.com/facebookresearch/DiT), [Latte](https://github.com/Vchitect/Latte),
100
+ 和 [PixArt](https://pixart-alpha.github.io/).的官方权重进行推理。
101
+ ✅ 重构代码库。查看[structure.md](docs/structure.md)以了解项目结构以及如何使用配置文件。
102
+
103
+ </details>
104
+
105
+ ### 按优先级排序的 TODO 列表
106
+
107
+ <details>
108
+ <summary>查看更多</summary>
109
+
110
+ * [x] 训练视频 VAE 并使我们的模型适应新的 VAE
111
+ * [x] 缩放模型参数和数据集大小
112
+ * [x] 纳入更好的调度程序(整流流程)
113
+ * [x] 评估流程
114
+ * [x] 完成数据处理流程(包括密集光流、美学评分、文本-图像相似度等)。有关更多信息,请参阅[数据集](/docs/datasets.md)
115
+ * [x] 支持图像和视频调节
116
+ * [x] 支持可变的纵横比、分辨率和持续时间
117
+
118
+ </details>
119
+
120
+ ## 内容
121
+
122
+ * [安装](#安装)
123
+ * [模型权重](#模型权重)
124
+ * [Gradio演示](#gradio演示)
125
+ * [推理](#推理)
126
+ * [数据处理](#数据处理)
127
+ * [训练](#训练)
128
+ * [评估](#评估)
129
+ * [贡献](#贡献)
130
+ * [引用](#引用)
131
+ * [致谢](#致谢)
132
+
133
+ 下面列出了其他有用的文档和链接。
134
+
135
+ * 报告: [技术报告 v1.2](docs/report_v3.md), [技术报告 v1.1](/docs/report_v2.md), [技术报告 v1.0](/docs/report_v1.md), [训练加速.md](docs/acceleration.md)
136
+ * Repo 结构: [结构.md](docs/structure.md)
137
+ * 配置文件说明: [config.md](docs/config.md)
138
+ * Useful commands: [commands.md](docs/commands.md)
139
+ * 数据处理管道和数据集: [datasets.md](docs/datasets.md)
140
+ * 每个数据处理工具的 README: [dataset conventions and management](/tools/datasets/README.md), [scene cutting](/tools/scene_cut/README.md), [scoring](/tools/scoring/README.md), [caption](/tools/caption/README.md)
141
+ * 评估: [eval](/eval/README.md)
142
+ * 画廊: [gallery](https://hpcaitech.github.io/Open-Sora/)
143
+
144
+ ## 安装
145
+
146
+ ### 从源头安装
147
+
148
+ 对于 CUDA 12.1,您可以使用以下命令[安装](/docs/installation.md)依赖项。否则,请参阅安装以获取有关不同 cuda 版本的更多说明以及数据预处理的其他依赖项。
149
+
150
+ ```bash
151
+ # create a virtual env and activate (conda as an example)
152
+ conda create -n opensora python=3.9
153
+ conda activate opensora
154
+
155
+ # install torch, torchvision and xformers
156
+ pip install -r requirements/requirements-cu121.txt
157
+
158
+ # download the repo
159
+ git clone https://github.com/hpcaitech/Open-Sora
160
+ cd Open-Sora
161
+
162
+ # the default installation is for inference only
163
+ pip install -v . # for development mode, `pip install -v -e .`
164
+
165
+
166
+ (Optional, recommended for fast speed, especially for training) To enable `layernorm_kernel` and `flash_attn`, you need to install `apex` and `flash-attn` with the following commands.
167
+
168
+ ```bash
169
+ # install flash attention
170
+ # set enable_flash_attn=False in config to disable flash attention
171
+ pip install packaging ninja
172
+ pip install flash-attn --no-build-isolation
173
+
174
+ # install apex
175
+ # set enable_layernorm_kernel=False in config to disable apex
176
+ pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" git+https://github.com/NVIDIA/apex.git
177
+ ```
178
+
179
+ ### 使用Docker
180
+
181
+ 运行以下命令从提供的Dockerfile 构建docker 镜像。
182
+
183
+ ```bash
184
+ docker build -t opensora .
185
+ ```
186
+
187
+ 运行以下命令以交互模式启动docker容器。
188
+
189
+ ```bash
190
+ docker run -ti --gpus all -v .:/workspace/Open-Sora opensora
191
+ ```
192
+
193
+ ## 模型权重
194
+
195
+ ### Open-Sora 1.2 模型权重
196
+ | 分辨率 | 模型大小 | 数据 | 迭代次数 | 批次大小 | 网址 |
197
+ | ---------- | ---------- | ---- | ----------- | ---------- | --- |
198
+ | Diffusion | 1.1B | 30M | 70k | 动态大小 | [:link:](https://huggingface.co/hpcai-tech/OpenSora-STDiT-v3) |
199
+ | VAE | 384M | 3M | 1M | 8 | [:link:](https://huggingface.co/hpcai-tech/OpenSora-VAE-v1.2) |
200
+
201
+ 请参阅我们的**[report 1.2](docs/report_v3.md)**以了解更多信息。
202
+
203
+ ### Open-Sora 1.1 模型权重
204
+
205
+ <details>
206
+ <summary>查看更多</summary>
207
+
208
+ | 分辨率 | M | Data | #iterations | Batch Size | URL |
209
+ | ------------------ | ---------- | -------------------------- | ----------- | ------------------------------------------------- | -------------------------------------------------------------------- |
210
+ | mainly 144p & 240p | 700M | 10M videos + 2M images | 100k | [dynamic](/configs/opensora-v1-1/train/stage2.py) | [:link:](https://huggingface.co/hpcai-tech/OpenSora-STDiT-v2-stage2) |
211
+ | 144p to 720p | 700M | 500K HQ videos + 1M images | 4k | [dynamic](/configs/opensora-v1-1/train/stage3.py) | [:link:](https://huggingface.co/hpcai-tech/OpenSora-STDiT-v2-stage3) |
212
+
213
+ 请参阅我们的 **[报告 1.1](docs/report_02.md)** 以了解更多信息。
214
+
215
+ :warning: **局限性**: 此版本包含已知问题,我们将在下一版本中修复这些问题(因为我们为下一版本节省了计算资源)。此外,由于此问题,视频生成可能会长时间失败,高分辨率将产生嘈杂的结果。
216
+
217
+ </details>
218
+
219
+ ### Open-Sora 1.0 模型权重
220
+ <details>
221
+ <summary>查看更多</summary>
222
+
223
+ | 分辨率 | 模型大小 | 数据 | 迭代次数 | 批量大小 | GPU 天数 (H800) | 网址
224
+ | ---------- | ---------- | ------ | ----------- | ---------- | --------------- |
225
+ | 16×512×512 | 700M | 20K HQ | 20k | 2×64 | 35 | [:link:](https://huggingface.co/hpcai-tech/Open-Sora/blob/main/OpenSora-v1-HQ-16x512x512.pth) |
226
+ | 16×256×256 | 700M | 20K HQ | 24k | 8×64 | 45 | [:link:](https://huggingface.co/hpcai-tech/Open-Sora/blob/main/OpenSora-v1-HQ-16x256x256.pth) |
227
+ | 16×256×256 | 700M | 366K | 80k | 8×64 | 117 | [:link:](https://huggingface.co/hpcai-tech/Open-Sora/blob/main/OpenSora-v1-16x256x256.pth) |
228
+
229
+ 训练流程: 16x256x256 $\rightarrow$ 16x256x256 高清 $\rightarrow$ 16x512x512 高质量.
230
+
231
+ 我们的模型权重部分由 [PixArt-α](https://github.com/PixArt-alpha/PixArt-alpha)初始化,参数数量为724M.更多信息请参阅 **[技术报告v1.0](docs/report_v1.md)**。数据集相关信息请参阅[数据集文件](docs/datasets.md). HQ 表示高质量.
232
+
233
+ :warning: **局限性**: 我们的模型是在有限的预算下训练的。质量和文本对齐相对较差。该模型表现不佳,特别是在生成人类时,无法遵循详细的说明。我们正在努力提高质量和文本对齐。
234
+
235
+ </details>
236
+
237
+ ## Gradio演示
238
+
239
+ 🔥 您可以在Hugging Face 上的[🤗 Gradio 应用程序](https://huggingface.co/spaces/hpcai-tech/open-sora)上在线体验Open-Sora。【由于GPU资源不足,已失效】
240
+
241
+ ### 本地部署
242
+
243
+ 如果您想在本地部署 gradio,我们还在这个存储库中提供了一个[Gradio 应用程序](./gradio) ,您可以使用以下命令启动一个交互式 Web 应用程序来体验使用 Open-Sora 生成视频。
244
+
245
+ ```bash
246
+ pip install gradio spaces
247
+ python gradio/app.py
248
+ ```
249
+
250
+ 这将在您的本地主机上启动 Gradio 应用程序。如果您想了解有关 Gradio 应用程序的更多信息,可以参考[Gradio README](./gradio/README.md)。
251
+
252
+ 要启用提示增强和其他语言输入(例如中文输入),您需要OPENAI_API_KEY在环境中进行设置。查看[OpenAI的文档](https://platform.openai.com/docs/quickstart)以获取您的 API 密钥。
253
+
254
+ ```bash
255
+ export OPENAI_API_KEY=YOUR_API_KEY
256
+ ```
257
+
258
+ ### 入门
259
+
260
+ 在 Gradio 应用程序中,基本选项如下:
261
+
262
+ ![Gradio Demo](/assets/readme/gradio_basic.png)
263
+
264
+ 生成视频最简单的方式是输入文本提示,然后点击“**生成视频**”按钮(如果找不到,请向下滚动)。生成的视频将显示在右侧面板中。勾选“**使用 GPT4o 增强提示**”将使用 GPT-4o 来细化提示,而“**随机提示**”按钮将由 GPT-4o 为您生成随机提示。由于 OpenAI 的 API 限制,提示细化结果具有一定的随机性。
265
+
266
+ 然后,你可以选择生成视频的**分辨率**、**时长**、**长宽比**。不同的分辨率和视频长度会影响视频生成速度。在 80G H100 GPU 上,生成速度和峰值内存使用量为:
267
+
268
+ | 分辨率 | 图像 | 2秒 | 4秒 | 8秒 | 16秒 |
269
+ | ---- | ------- | -------- | --------- | --------- | --------- |
270
+ | 360p | 3s, 24G | 18s, 27G | 31s, 27G | 62s, 28G | 121s, 33G |
271
+ | 480p | 2s, 24G | 29s, 31G | 55s, 30G | 108s, 32G | 219s, 36G |
272
+ | 720p | 6s, 27G | 68s, 41G | 130s, 39G | 260s, 45G | 547s, 67G |
273
+
274
+ 注意,除了文本转视频,你还可以使用图片转视频。你可以上传图片,然后点击“**生成视频**”按钮,生成以图片为第一帧的视频。或者,你可以填写文本提示,然后点击“**生成图片**”按钮,根据文本提示生成图片,然后点击“**生成视频**”按钮,根据同一模型生成的图片生成视频。
275
+
276
+ ![Gradio Demo](/assets/readme/gradio_option.png)
277
+
278
+ 然后您可以指定更多选项,包括“**运动强度**”、“**美学**”和“**相机运动**”。如果未选中“启用”或选择“无”,则不会将信息传递给模型。否则,模型将生成具有指定运动强度、美学分数���相机运动的视频。
279
+
280
+ 对于**美学分数**,我们建议使用高于 6 的值。对于**运动强度**,较小的值将导致更平滑但动态性较差的视频,而较大的值将导致更动态但可能更模糊的视频。因此,您可以尝试不使用它,然后根据生成的视频进行调整。对于**相机运动**,有时模型无法很好地遵循指令,我们正在努力改进它。
281
+
282
+ 您还可以调整“**采样步数**”,这是去噪的次数,与生成速度直接相关。小于 30 的数字通常会导致较差的生成结果,而大于 100 的数字通常不会有明显的改善。“种子”用于可重复性,您可以将其设置为固定数字以生成相同的视频。“**CFG 比例**”控制模型遵循文本提示的程度,较小的值会导致视频更随机,而较大的值会导致视频更遵循文本(建议为 7)。
283
+
284
+ 对于更高级的用法,您可以参考[Gradio README](./gradio/README.md#advanced-usage).
285
+
286
+ ## 推理
287
+
288
+ ### Open-Sora 1.2 命令行推理
289
+
290
+ 基础的命令行推理:
291
+
292
+ ```bash
293
+ # text to video
294
+ python scripts/inference.py configs/opensora-v1-2/inference/sample.py \
295
+ --num-frames 4s --resolution 720p --aspect-ratio 9:16 \
296
+ --prompt "a beautiful waterfall"
297
+ ```
298
+
299
+ 您可以向命令行添加更多选项来定制生成。
300
+
301
+ ```bash
302
+ python scripts/inference.py configs/opensora-v1-2/inference/sample.py \
303
+ --num-frames 4s --resolution 720p --aspect-ratio 9:16 \
304
+ --num-sampling-steps 30 --flow 5 --aes 6.5 \
305
+ --prompt "a beautiful waterfall"
306
+ ```
307
+
308
+ 对于图像到视频生成和其他功能,API 与 Open-Sora 1.1 兼容。请参阅[此处]](commands.md)了解更多说明。
309
+
310
+ 如果您的安装不包含 `apex` 和 `flash-attn`, 则需要在配置文件中或通过以下命令禁用它们。
311
+
312
+ ```bash
313
+ python scripts/inference.py configs/opensora-v1-2/inference/sample.py \
314
+ --num-frames 4s --resolution 720p \
315
+ --layernorm-kernel False --flash-attn False \
316
+ --prompt "a beautiful waterfall"
317
+ ```
318
+
319
+ ### 序列并行推理
320
+
321
+ 要启用序列并行,您需要使用 `torchrun` 来运行推理脚本。以下命令将使用 2 个 GPU 运行推理。
322
+
323
+ ```bash
324
+ # text to video
325
+ CUDA_VISIBLE_DEVICES=0,1 torchrun --nproc_per_node 2 scripts/inference.py configs/opensora-v1-2/inference/sample.py \
326
+ --num-frames 4s --resolution 720p --aspect-ratio 9:16 \
327
+ --prompt "a beautiful waterfall"
328
+ ```
329
+
330
+ :warning: **注意**: gradio 部署不支持序列并行。目前,只有当维度可以除以 GPU 数量时才支持序列并行。因此,在某些情况下可能会失败。我们测试了 4 个 GPU 用于 720p 和 2 个 GPU 用于 480p。
331
+
332
+
333
+ ### GPT-4o 快速细化
334
+
335
+ 我们发现 GPT-4o 可以细化提示并提高生成视频的质量。利用此功能,您还可以使用其他语言(例如中文)作为提示。要启用此功能,您需要在环境中准备您的 openai api 密钥:
336
+
337
+ ```bash
338
+ export OPENAI_API_KEY=YOUR_API_KEY
339
+ ```
340
+
341
+ 然后您可以用 `--llm-refine True` 启用GPT-4o进行提示细化以完成推理。
342
+
343
+ ### Open-Sora 1.1 命令行推理
344
+ <details>
345
+ <summary>查看更多</summary>
346
+
347
+ 由于 Open-Sora 1.1 支持动态输入大小的推理,因此您可以将输入大小作为参数传递。
348
+
349
+ ```bash
350
+ # text to video
351
+ python scripts/inference.py configs/opensora-v1-1/inference/sample.py --prompt "A beautiful sunset over the city" --num-frames 32 --image-size 480 854
352
+ ```
353
+
354
+ 如果您的安装不包含`apex` 和 `flash-attn`,则需要在配置文件中或通过以下命令禁用它们。
355
+
356
+ ```bash
357
+ python scripts/inference.py configs/opensora-v1-1/inference/sample.py --prompt "A beautiful sunset over the city" --num-frames 32 --image-size 480 854 --layernorm-kernel False --flash-attn False
358
+ ```
359
+
360
+ 请参阅[此处](docs/commands.md#inference-with-open-sora-11)了解更多说明,包括文本转图像、图像转视频、视频转视频和无限时间生成。
361
+
362
+ </details>
363
+
364
+ ### Open-Sora 1.0 命令行推理
365
+
366
+ <details>
367
+ <summary>查看更多</summary>
368
+
369
+ 我们还提供了离线推理脚本。运行以下命令生成样本,所需的模型权重将自动下载。要更改采样提示,请修改传递给的 txt 文件--prompt-path。请参阅[此处](docs/structure.md#inference-config-demos)以自定义配置。
370
+
371
+ ```bash
372
+ # Sample 16x512x512 (20s/sample, 100 time steps, 24 GB memory)
373
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x512x512.py --ckpt-path OpenSora-v1-HQ-16x512x512.pth --prompt-path ./assets/texts/t2v_samples.txt
374
+
375
+ # Sample 16x256x256 (5s/sample, 100 time steps, 22 GB memory)
376
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path OpenSora-v1-HQ-16x256x256.pth --prompt-path ./assets/texts/t2v_samples.txt
377
+
378
+ # Sample 64x512x512 (40s/sample, 100 time steps)
379
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/64x512x512.py --ckpt-path ./path/to/your/ckpt.pth --prompt-path ./assets/texts/t2v_samples.txt
380
+
381
+ # Sample 64x512x512 with sequence parallelism (30s/sample, 100 time steps)
382
+ # sequence parallelism is enabled automatically when nproc_per_node is larger than 1
383
+ torchrun --standalone --nproc_per_node 2 scripts/inference.py configs/opensora/inference/64x512x512.py --ckpt-path ./path/to/your/ckpt.pth --prompt-path ./assets/texts/t2v_samples.txt
384
+ ```
385
+
386
+ 速度是在 H800 GPU 上测试的。有关使用其他型号进行推理,请参阅[此处](docs/commands.md) 了解更多说明。要降低内存使用量,请`vae.micro_batch_size`在配置中设置较小的值(略低采样速度)。
387
+
388
+ </details>
389
+
390
+ ## 数据处理
391
+
392
+ 高质量的数据对于训练良好的生成模型至关重要。为此,我们建立了完整的数据处理流程,可以将原始视频无缝转换为高质量的视频-文本对。流程如下所示。有关详细信息,请参阅[数据处理](docs/data_processing.md)。另请查看我们使用的[数据集](docs/datasets.md)。
393
+
394
+ ![Data Processing Pipeline](/assets/readme/report_data_pipeline.png)
395
+
396
+ ## 训练
397
+
398
+ ### Open-Sora 1.2 训练
399
+
400
+ 训练过程与Open-Sora 1.1相同。
401
+
402
+ ```bash
403
+ # one node
404
+ torchrun --standalone --nproc_per_node 8 scripts/train.py \
405
+ configs/opensora-v1-2/train/stage1.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
406
+ # multiple nodes
407
+ colossalai run --nproc_per_node 8 --hostfile hostfile scripts/train.py \
408
+ configs/opensora-v1-2/train/stage1.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
409
+ ```
410
+
411
+ ### Open-Sora 1.1 训练
412
+
413
+ <details>
414
+ <summary>查看更多</summary>
415
+
416
+ 在文件中准备好数据后`csv`,运行以下命令在单个节点上启动训练。
417
+
418
+ ```bash
419
+ # one node
420
+ torchrun --standalone --nproc_per_node 8 scripts/train.py \
421
+ configs/opensora-v1-1/train/stage1.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
422
+ # multiple nodes
423
+ colossalai run --nproc_per_node 8 --hostfile hostfile scripts/train.py \
424
+ configs/opensora-v1-1/train/stage1.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
425
+ ```
426
+
427
+ </details>
428
+
429
+ ### Open-Sora 1.0 训练
430
+
431
+ <details>
432
+ <summary>查看更多</summary>
433
+
434
+ 在文件中准备好数据后`csv`,运行以下命令在单个节点上启动训练。
435
+
436
+ ```bash
437
+ # 1 GPU, 16x256x256
438
+ torchrun --nnodes=1 --nproc_per_node=1 scripts/train.py configs/opensora/train/16x256x256.py --data-path YOUR_CSV_PATH
439
+ # 8 GPUs, 64x512x512
440
+ torchrun --nnodes=1 --nproc_per_node=8 scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
441
+ ```
442
+
443
+ 要在多个节点上启动训练,请根据[ColossalAI](https://colossalai.org/docs/basics/launch_colossalai/#launch-with-colossal-ai-cli)准备一个主机文件,并运行以下命令。
444
+
445
+ ```bash
446
+ colossalai run --nproc_per_node 8 --hostfile hostfile scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
447
+ ```
448
+ 有关训练其他模型和高级用法,请参阅[此处](docs/commands.md)获取更多说明。
449
+
450
+ </details>
451
+
452
+ ## 评估
453
+
454
+ 我们支持基于以下方面的评估:
455
+
456
+ - 验证损失
457
+ - [VBench](https://github.com/Vchitect/VBench/tree/master)h分数
458
+ - VBench-i2v 分数
459
+ - 批量生成以供人工评估
460
+ 所有评估代码均发布在 `eval`文件夹中。查看[README](/eval/README.md)了解更多详细信息。我们的 [技术报告](report_v3.md#评估)还提供了有关训练期间评估的更多信息。下表显示 Open-Sora 1.2 大大改进了 Open-Sora 1.0。
461
+
462
+ | 模型 | 总得分 | 质量得分 | 语义得分 |
463
+ | -------------- | ----------- | ------------- | -------------- |
464
+ | Open-Sora V1.0 | 75.91% | 78.81% | 64.28% |
465
+ | Open-Sora V1.2 | 79.23% | 80.71% | 73.30% |
466
+
467
+ ## VAE 训练与评估
468
+
469
+ 我们训练一个由空间 VAE 和时间 VAE 组成的 VAE 管道。有关更多详细信息,请参阅[VAE 文档](vae.md)。在运行以下命令之前,请按照我们的[安装文档](installation.md)安装 VAE 和评估所需的依赖项。
470
+
471
+ 如果您想训练自己的 VAE,我们需要按照[数据处理](#data-processing)流程在 csv 中准备数据,然后运行以下命令。请注意,您需要根据自己的 csv 数据大小相应地调整配置文件中的训练`epochs`数量。
472
+
473
+
474
+ ```bash
475
+ # stage 1 training, 380k steps, 8 GPUs
476
+ torchrun --nnodes=1 --nproc_per_node=8 scripts/train_vae.py configs/vae/train/stage1.py --data-path YOUR_CSV_PATH
477
+ # stage 2 training, 260k steps, 8 GPUs
478
+ torchrun --nnodes=1 --nproc_per_node=8 scripts/train_vae.py configs/vae/train/stage2.py --data-path YOUR_CSV_PATH
479
+ # stage 3 training, 540k steps, 24 GPUs
480
+ torchrun --nnodes=3 --nproc_per_node=8 scripts/train_vae.py configs/vae/train/stage3.py --data-path YOUR_CSV_PATH
481
+ ```
482
+
483
+ 为了评估 VAE 的性能,您需要首先运行 VAE 推理来生成视频,然后计算生成的视频的分数:
484
+
485
+ ```bash
486
+ # video generation
487
+ torchrun --standalone --nnodes=1 --nproc_per_node=1 scripts/inference_vae.py configs/vae/inference/video.py --ckpt-path YOUR_VAE_CKPT_PATH --data-path YOUR_CSV_PATH --save-dir YOUR_VIDEO_DIR
488
+ # the original videos will be saved to `YOUR_VIDEO_DIR_ori`
489
+ # the reconstructed videos through the pipeline will be saved to `YOUR_VIDEO_DIR_rec`
490
+ # the reconstructed videos through the spatial VAE only will be saved to `YOUR_VIDEO_DIR_spatial`
491
+
492
+ # score calculation
493
+ python eval/vae/eval_common_metric.py --batch_size 2 --real_video_dir YOUR_VIDEO_DIR_ori --generated_video_dir YOUR_VIDEO_DIR_rec --device cuda --sample_fps 24 --crop_size 256 --resolution 256 --num_frames 17 --sample_rate 1 --metric ssim psnr lpips flolpips
494
+ ```
495
+
496
+
497
+ ## 贡献
498
+
499
+ 感谢以下出色的贡献者:
500
+
501
+ <a href="https://github.com/hpcaitech/Open-Sora/graphs/contributors">
502
+ <img src="https://contrib.rocks/image?repo=hpcaitech/Open-Sora" />
503
+ </a>
504
+
505
+ 如果您希望为该项目做出贡献,请参阅[Contribution Guideline](./CONTRIBUTING.md)。
506
+
507
+ ## 致谢
508
+
509
+ 这里我们仅列出了部分项目,其他研究成果及数据集请参考我们的报告。
510
+
511
+ * [ColossalAI](https://github.com/hpcaitech/ColossalAI): 强大的大型模型并行加速与优化系统。
512
+ * [DiT](https://github.com/facebookresearch/DiT): 带有 Transformer 的可扩展扩散模型。
513
+ * [OpenDiT](https://github.com/NUS-HPC-AI-Lab/OpenDiT): DiT 训练的加速器。我们从 OpenDiT 中采用了有价值的训练进度加速策略。
514
+ * [PixArt](https://github.com/PixArt-alpha/PixArt-alpha): 一个基于 DiT 的开源文本转图像模型。
515
+ * [Latte](https://github.com/Vchitect/Latte): 尝试高效地训练视频的 DiT。
516
+ * [StabilityAI VAE](https://huggingface.co/stabilityai/sd-vae-ft-mse-original): 一个强大的图像 VAE 模型。
517
+ * [CLIP](https://github.com/openai/CLIP): 一个强大的文本图像嵌入模型。
518
+ * [T5](https://github.com/google-research/text-to-text-transfer-transformer): 强大的文本编码器。
519
+ * [LLaVA](https://github.com/haotian-liu/LLaVA): 基于[Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-v0.1) 和 [Yi-34B](https://huggingface.co/01-ai/Yi-34B). 的强大图像字幕模型。
520
+ * [PLLaVA](https://github.com/magic-research/PLLaVA): 一个强大的视频字幕模型。
521
+ * [MiraData](https://github.com/mira-space/MiraData):具有长持续时间和结构化字幕的大规模视频数据集。
522
+
523
+ 我们感谢他们的出色工作和对开源的慷慨贡献。
524
+
525
+ ## 引用
526
+
527
+ ```bibtex
528
+ @software{opensora,
529
+ author = {Zangwei Zheng and Xiangyu Peng and Tianji Yang and Chenhui Shen and Shenggui Li and Hongxin Liu and Yukun Zhou and Tianyi Li and Yang You},
530
+ title = {Open-Sora: Democratizing Efficient Video Production for All},
531
+ month = {March},
532
+ year = {2024},
533
+ url = {https://github.com/hpcaitech/Open-Sora}
534
+ }
535
+ ```
536
+
537
+ ## Star增长
538
+
539
+ [![Star History Chart](https://api.star-history.com/svg?repos=hpcaitech/Open-Sora&type=Date)](https://star-history.com/#hpcaitech/Open-Sora&Date)
exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/READMEv1.1.md ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <p align="center">
2
+ <img src="../../assets/readme/icon.png" width="250"/>
3
+ <p>
4
+
5
+ <div align="center">
6
+ <a href="https://github.com/hpcaitech/Open-Sora/stargazers"><img src="https://img.shields.io/github/stars/hpcaitech/Open-Sora?style=social"></a>
7
+ <a href="https://hpcaitech.github.io/Open-Sora/"><img src="https://img.shields.io/badge/Gallery-View-orange?logo=&amp"></a>
8
+ <a href="https://discord.gg/shpbperhGs"><img src="https://img.shields.io/badge/Discord-join-blueviolet?logo=discord&amp"></a>
9
+ <a href="https://join.slack.com/t/colossalaiworkspace/shared_invite/zt-247ipg9fk-KRRYmUl~u2ll2637WRURVA"><img src="https://img.shields.io/badge/Slack-ColossalAI-blueviolet?logo=slack&amp"></a>
10
+ <a href="https://twitter.com/yangyou1991/status/1769411544083996787?s=61&t=jT0Dsx2d-MS5vS9rNM5e5g"><img src="https://img.shields.io/badge/Twitter-Discuss-blue?logo=twitter&amp"></a>
11
+ <a href="https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/WeChat.png"><img src="https://img.shields.io/badge/微信-小助手加群-green?logo=wechat&amp"></a>
12
+ <a href="https://hpc-ai.com/blog/open-sora-v1.0"><img src="https://img.shields.io/badge/Open_Sora-Blog-blue"></a>
13
+ </div>
14
+
15
+ ## Open-Sora: 完全开源的高效复现类Sora视频生成方案
16
+ **Open-Sora**项目是一项致力于**高效**制作高质量视频,并使所有人都能使用其模型、工具和内容的计划。
17
+ 通过采用**开源**原则,Open-Sora 不仅实现了先进视频生成技术的低成本普及,还提供了一个精简且用户友好的方案,简化了视频制作的复杂性。
18
+ 通过 Open-Sora,我们希望更多开发者一起探索内容创作领域的创新、创造和包容。
19
+
20
+ [[English Document]](/README.md)
21
+
22
+ <h4>Open-Sora 项目目前处在早期阶段,并将持续更新。</h4>
23
+
24
+ ## 📰 资讯
25
+ > 由于文档需要进行翻译,最新资讯请看[英文文档](/README.md#-news)
26
+ * **[2024.04.25]** 🤗 我们在Hugging Face Spaces上发布了Open-Sora的[Gradio demo](https://huggingface.co/spaces/hpcai-tech/open-sora)。
27
+ * **[2024.04.25]** 🔥 我们发布了支持**2秒至15秒、144p至720p、任意宽高比**的文本到图像、文本到视频、图像到视频、视频到视频、无限时间生成的**Open-Sora 1.1**版本。此外,还发布了一个完整的视频处理流程。 [[checkpoints]]() [[report]](/docs/report_02.md)
28
+ * **[2024.03.18]** 🔥 我们发布了**Open-Sora 1.0**,这是一个完全开源的视频生成项目。
29
+ * Open-Sora 1.0 支持视频数据预处理、加速训练、推理等全套流程。
30
+ * 我们提供的[模型权重](#模型权重)只需 3 天的训练就能生成 2 秒的 512x512 视频。
31
+ * **[2024.03.04]** Open-Sora:开源Sora复现方案,成本降低46%,序列扩充至近百万。[[英文博客]](https://hpc-ai.com/blog/open-sora)
32
+
33
+ ## 🎥 最新视频
34
+
35
+ | **2s 512×512** | **2s 512×512** | **2s 512×512** |
36
+ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- |
37
+ | [<img src="/assets/readme/sample_0.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/de1963d3-b43b-4e68-a670-bb821ebb6f80) | [<img src="/assets/readme/sample_1.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/13f8338f-3d42-4b71-8142-d234fbd746cc) | [<img src="/assets/readme/sample_2.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/fa6a65a6-e32a-4d64-9a9e-eabb0ebb8c16) |
38
+ | A serene night scene in a forested area. [...] The video is a time-lapse, capturing the transition from day to night, with the lake and forest serving as a constant backdrop. | A soaring drone footage captures the majestic beauty of a coastal cliff, [...] The water gently laps at the rock base and the greenery that clings to the top of the cliff. | The majestic beauty of a waterfall cascading down a cliff into a serene lake. [...] The camera angle provides a bird's eye view of the waterfall. |
39
+ | [<img src="/assets/readme/sample_3.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/64232f84-1b36-4750-a6c0-3e610fa9aa94) | [<img src="/assets/readme/sample_4.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/983a1965-a374-41a7-a76b-c07941a6c1e9) | [<img src="/assets/readme/sample_5.gif" width="">](https://github.com/hpcaitech/Open-Sora/assets/99191637/ec10c879-9767-4c31-865f-2e8d6cf11e65) |
40
+ | A bustling city street at night, filled with the glow of car headlights and the ambient light of streetlights. [...] | The vibrant beauty of a sunflower field. The sunflowers are arranged in neat rows, creating a sense of order and symmetry. [...] | A serene underwater scene featuring a sea turtle swimming through a coral reef. The turtle, with its greenish-brown shell [...] |
41
+
42
+ 视频经过降采样处理为`.gif`格式,以便显示。点击查看原始视频。为便于显示,文字经过修剪,全文请参见 [此处](/assets/texts/t2v_samples.txt)。在我们的[图片库](https://hpcaitech.github.io/Open-Sora/)中查看更多样本。
43
+
44
+ ## 🔆 新功能
45
+ > 由于文档需要进行翻译,最新资讯请看[英文文档](/README.md#-new-featuresupdates)
46
+ * 📍Open-Sora-v1 已发布。[这里](#模型权重)提供了模型权重。只需 400K 视频片段和在单卡 H800 上训200天(类比Stable Video Diffusion 的 152M 样本),我们就能生成 2 秒的 512×512 视频。
47
+ * ✅ 从图像扩散模型到视频扩散模型的三阶段训练。我们提供每个阶段的权重。
48
+ * ✅ 支持训练加速,包括Transformer加速、更快的 T5 和 VAE 以及序列并行。在对 64x512x512 视频进行训练时,Open-Sora 可将训练速度提高**55%**。详细信息请参见[训练加速](acceleration.md)。
49
+ * 🔧 我们提供用于数据预处理的视频切割和字幕工具。有关说明请点击[此处](tools/data/README.md),我们的数据收集计划请点击 [数据集](datasets.md)。
50
+ * ✅ 我们发现来自[VideoGPT](https://wilson1yan.github.io/videogpt/index.html)的 VQ-VAE 质量较低,因此采用了来自[Stability-AI](https://huggingface.co/stabilityai/sd-vae-ft-mse-original) 的高质量 VAE。我们还发现使用添加了时间维度的采样会导致生成质量降低。更多讨论,请参阅我们的 **[报告](docs/report_v1.md)**。
51
+ * ✅ 我们研究了不同的架构,包括 DiT、Latte 和我们提出的 **STDiT**。我们的STDiT在质量和速度之间实现了更好的权衡。更多讨论,请参阅我们的 **[报告](report_v1.md)**。
52
+ * ✅ 支持剪辑和 T5 文本调节。
53
+ * ✅ 通过将图像视为单帧视频,我们的项目支持在图像和视频(如 ImageNet 和 UCF101)上训练 DiT。更多说明请参见 [指令解析](command.md)。
54
+ * ✅ 利用[DiT](https://github.com/facebookresearch/DiT)、[Latte](https://github.com/Vchitect/Latte) 和 [PixArt](https://pixart-alpha.github.io/) 的官方权重支持推理。
55
+
56
+ <details>
57
+ <summary>查看更多</summary>
58
+
59
+ * ✅ 重构代码库。请参阅[结构](structure.md),了解项目结构以及如何使用配置文件。
60
+
61
+ </details>
62
+
63
+ ### 下一步计划【按优先级排序】
64
+
65
+ * [ ] 训练视频-VAE并让模型适应新的VAE **[项目进行中]**
66
+ * [ ] 缩放模型参数和数据集大小 **[项目进行中]**
67
+ * [ ] 纳入更好的时间表,例如 SD3 中的修正流程。 **[项目进行中]**
68
+
69
+ <details>
70
+ <summary>查看更多</summary>
71
+
72
+ * [x] 评估流程。
73
+ * [x] 完成数据处理流程(包括密集光流、美学评分、文本图像相似性、重复数据删除等)。更多信息请参见[数据集](datasets.md)
74
+ * [x] 支持图像和视频调节。
75
+ * [x] 支持可变长宽比、分辨率和持续时间。
76
+
77
+ </details>
78
+
79
+ ## 目录
80
+
81
+ * [安装](#安装)
82
+ * [模型权重](#模型权重)
83
+ * [推理](#推理)
84
+ * [数据处理](#数据处理)
85
+ * [训练](#训练)
86
+ * [评估](#评估)
87
+ * [贡献](#贡献)
88
+ * [声明](#声明)
89
+ * [引用](#引用)
90
+
91
+ ## 安装
92
+
93
+ ### 从源码安装
94
+ ```bash
95
+ # create a virtual env
96
+ conda create -n opensora python=3.10
97
+
98
+ # install torch
99
+ # the command below is for CUDA 12.1, choose install commands from
100
+ # https://pytorch.org/get-started/locally/ based on your own CUDA version
101
+ pip3 install torch torchvision
102
+
103
+ # install flash attention (optional)
104
+ pip install packaging ninja
105
+ pip install flash-attn --no-build-isolation
106
+
107
+ # install apex (optional)
108
+ pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" git+https://github.com/NVIDIA/apex.git
109
+
110
+ # install xformers
111
+ pip3 install -U xformers --index-url https://download.pytorch.org/whl/cu121
112
+
113
+ # install this project
114
+ git clone https://github.com/hpcaitech/Open-Sora
115
+ cd Open-Sora
116
+ pip install -v .
117
+ ```
118
+
119
+ ### 使用Docker镜像
120
+
121
+ 运行如下指令使用提供的Dockerfile构建镜像:
122
+
123
+ ```bash
124
+ docker build -t opensora ./docker
125
+ ```
126
+
127
+ 运行以下命令以启动交互模式下的 Docker 容器:
128
+
129
+ ```bash
130
+ docker run -ti --gpus all -v {MOUNT_DIR}:/data opensora
131
+ ```
132
+
133
+ 安装完成后,��议阅读[结构](structure.md),了解项目结构以及如何使用配置文件。
134
+
135
+ ## 模型权重
136
+
137
+ | 分辨率 | 数据 | 迭代次数 | 批量大小 | GPU 天数 (H800) | 网址 |
138
+ | ---------- | ------ | ----------- | ---------- | --------------- | ---------- |
139
+ | 16×256×256 | 366K | 80k | 8×64 | 117 | [:link:]() |
140
+ | 16×256×256 | 20K HQ | 24k | 8×64 | 45 | [:link:]() |
141
+ | 16×512×512 | 20K HQ | 20k | 2×64 | 35 | [:link:]() |
142
+
143
+ 我们模型的权重部分由[PixArt-α](https://github.com/PixArt-alpha/PixArt-alpha) 初始化。参数数量为 724M。有关训练的更多信息,请参阅我们的 **[报告](report_v1.md)**。有关数据集的更多信息,请参阅[数据](datasets.md)。HQ 表示高质量。
144
+ :warning: **局限性**:我们的模型是在有限的预算内训练出来的。质量和文本对齐度相对较差。特别是在生成人类时,模型表现很差,无法遵循详细的指令。我们正在努力改进质量和文本对齐。
145
+
146
+ ## 推理
147
+
148
+ 要使用我们提供的权重进行推理,首先要将[T5](https://huggingface.co/DeepFloyd/t5-v1_1-xxl/tree/main)权重下载到pretrained_models/t5_ckpts/t5-v1_1-xxl 中。然后下载模型权重。运行以下命令生成样本。请参阅[此处](structure.md#推理配置演示)自定义配置。
149
+
150
+ ```bash
151
+ # Sample 16x512x512 (20s/sample, 100 time steps, 24 GB memory)
152
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x512x512.py --ckpt-path OpenSora-v1-HQ-16x512x512.pth --prompt-path ./assets/texts/t2v_samples.txt
153
+
154
+ # Sample 16x256x256 (5s/sample, 100 time steps, 22 GB memory)
155
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path OpenSora-v1-HQ-16x256x256.pth --prompt-path ./assets/texts/t2v_samples.txt
156
+
157
+ # Sample 64x512x512 (40s/sample, 100 time steps)
158
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/64x512x512.py --ckpt-path ./path/to/your/ckpt.pth --prompt-path ./assets/texts/t2v_samples.txt
159
+
160
+ # Sample 64x512x512 with sequence parallelism (30s/sample, 100 time steps)
161
+ # sequence parallelism is enabled automatically when nproc_per_node is larger than 1
162
+ torchrun --standalone --nproc_per_node 2 scripts/inference.py configs/opensora/inference/64x512x512.py --ckpt-path ./path/to/your/ckpt.pth --prompt-path ./assets/texts/t2v_samples.txt
163
+
164
+ ```
165
+
166
+ 我们在 H800 GPU 上进行了速度测试。如需使用其他模型进行推理,请参阅[此处](commands.md)获取更多说明。减小`vae.micro_batch_size`来降低显存使用(但取样速度会略微减慢)。
167
+
168
+ ## 数据处理
169
+
170
+ 高质量数据是高质量模型的关键。[这里](datasets.md)有我们使用过的数据集和数据收集计划。我们提供处理视频数据的工具。目前,我们的数据处理流程包括以下步骤:
171
+
172
+ 1. 下载数据集。[[文件](/tools/datasets/README.md)]
173
+ 2. 将视频分割成片段。 [[文件](/tools/scene_cut/README.md)]
174
+ 3. 生成视频字幕。 [[文件](/tools/caption/README.md)]
175
+
176
+ ## 训练
177
+
178
+ ### Open-Sora 1.0 训练
179
+ <details>
180
+ <summary>查看更多</summary>
181
+
182
+ 要启动训练,首先要将[T5](https://huggingface.co/DeepFloyd/t5-v1_1-xxl/tree/main)权重下载到pretrained_models/t5_ckpts/t5-v1_1-xxl 中。然后运行以下命令在单个节点上启动训练。
183
+
184
+ ```bash
185
+ # 1 GPU, 16x256x256
186
+ torchrun --nnodes=1 --nproc_per_node=1 scripts/train.py configs/opensora/train/16x256x512.py --data-path YOUR_CSV_PATH
187
+ # 8 GPUs, 64x512x512
188
+ torchrun --nnodes=1 --nproc_per_node=8 scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
189
+ ```
190
+
191
+ 要在多个节点上启动训练,请根据[ColossalAI](https://colossalai.org/docs/basics/launch_colossalai/#launch-with-colossal-ai-cli) 准备一个主机文件,并运行以下命令。
192
+
193
+ ```bash
194
+ colossalai run --nproc_per_node 8 --hostfile hostfile scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT
195
+ ```
196
+
197
+ 有关其他模型的训练和高级使用方法,请参阅[此处](commands.md)获取更多说明。
198
+
199
+ </details>
200
+
201
+ ## 评估
202
+
203
+ 点击[这里](https://github.com/hpcaitech/Open-Sora/blob/main/eval/README.md)查看评估
204
+
205
+ ## 贡献
206
+
207
+ 本中文翻译还有许多不足,如果您希望为该项目做出贡献,可以参考 [贡献指南](/CONTRIBUTING.md).
208
+
209
+ 目前需要翻译或更新的文件:
210
+ * [ ] 更新[资讯](#-资讯)
211
+ * [ ] 更新[最新视频](#-最新视频)
212
+ * [ ] 更新[新功能](#-新功能)。
213
+ * [ ] 翻译[评估](https://github.com/hpcaitech/Open-Sora/blob/main/eval/README.md)文件
214
+ * [ ] 更新Open-Sora 1.1[训练](#训练)
215
+ ## 声明
216
+
217
+ * [ColossalAI](https://github.com/hpcaitech/ColossalAI): A powerful large model parallel acceleration and optimization
218
+ * [DiT](https://github.com/facebookresearch/DiT): Scalable Diffusion Models with Transformers.
219
+ * [OpenDiT](https://github.com/NUS-HPC-AI-Lab/OpenDiT): An acceleration for DiT training. We adopt valuable acceleration strategies for training progress from OpenDiT.
220
+ * [PixArt](https://github.com/PixArt-alpha/PixArt-alpha): An open-source DiT-based text-to-image model.
221
+ * [Latte](https://github.com/Vchitect/Latte): An attempt to efficiently train DiT for video.
222
+ * [StabilityAI VAE](https://huggingface.co/stabilityai/sd-vae-ft-mse-original): A powerful image VAE model.
223
+ * [CLIP](https://github.com/openai/CLIP): A powerful text-image embedding model.
224
+ * [T5](https://github.com/google-research/text-to-text-transfer-transformer): A powerful text encoder.
225
+ * [LLaVA](https://github.com/haotian-liu/LLaVA): A powerful image captioning model based on [Yi-34B](https://huggingface.co/01-ai/Yi-34B).
226
+
227
+ 我们对他们的出色工作和对开源的慷慨贡献表示感谢。
228
+
229
+ ## 引用
230
+
231
+ ```bibtex
232
+ @software{opensora,
233
+ author = {Zangwei Zheng and Xiangyu Peng and Yang You},
234
+ title = {Open-Sora: Democratizing Efficient Video Production for All},
235
+ month = {March},
236
+ year = {2024},
237
+ url = {https://github.com/hpcaitech/Open-Sora}
238
+ }
239
+ ```
240
+
241
+ [Zangwei Zheng](https://github.com/zhengzangw) and [Xiangyu Peng](https://github.com/xyupeng) equally contributed to this work during their internship at [HPC-AI Tech](https://hpc-ai.com/).
242
+
243
+ ## Star 走势
244
+
245
+ [![Star History Chart](https://api.star-history.com/svg?repos=hpcaitech/Open-Sora&type=Date)](https://star-history.com/#hpcaitech/Open-Sora&Date)
exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/acceleration.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 加速
2
+
3
+ >本文档对应于Open-Sora v1.1版本。
4
+
5
+ Open-Sora 旨在为扩散模型提供一个高速训练框架。在 64 帧 512x512 视频上训练时,我们可以实现 **55%** 的训练速度加速。我们的框架支持训练
6
+ **1分钟1080p视频**。
7
+
8
+ ## 加速的 Transformer
9
+
10
+ Open-Sora 通过以下方式提高训练速度:
11
+
12
+ - 内核优化,包括 [flash attention](https://github.com/Dao-AILab/flash-attention), 融合 layernorm 内核以及由 colossalAI
13
+ 编译的内核。
14
+ - 混合并行性,包括 ZeRO。
15
+ - 用于更大批量的梯度检查点。
16
+
17
+ 我们在图像上的训练速度可与 [OpenDiT](https://github.com/NUS-HPC-AI-Lab/OpenDiT) 相媲美,这是一个加速 DiT
18
+ 训练的项目。训练速度是在批处理大小为 128、图像大小为 256x256 的 8 个 H800 GPU 上测量的。
19
+
20
+ | 模型 | 吞吐量 (img/s/GPU) | 吞吐量 (tokens/s/GPU) |
21
+ |----------|-----------------|--------------------|
22
+ | DiT | 100 | 26k |
23
+ | OpenDiT | 175 | 45k |
24
+ | OpenSora | 175 | 45k |
25
+
26
+ ## 高效的 STDiT
27
+
28
+ 我们的 STDiT 采用时空注意力对视频数据进行建模。与直接全神贯注在 Dit 相比,我们的 STDiT 随着帧数的增加而更有效率。我们当前的框架仅支持序列超长序列的并行性。
29
+
30
+ 训练速度是在 8 个 H800 GPU 上测量的,应用了加速技术,GC 表示梯度检查点。
31
+ 两者都具有像 PixArt 一样的 T5 调节。
32
+
33
+ | 模型 | 设置 | 吞吐量 (sample/s/GPU) | 吞吐量 (tokens/s/GPU) |
34
+ |------------------|----------------|--------------------|--------------------|
35
+ | DiT | 16x256 (4k) | 7.20 | 29k |
36
+ | STDiT | 16x256 (4k) | 7.00 | 28k |
37
+ | DiT | 16x512 (16k) | 0.85 | 14k |
38
+ | STDiT | 16x512 (16k) | 1.45 | 23k |
39
+ | DiT (GC) | 64x512 (65k) | 0.08 | 5k |
40
+ | STDiT (GC) | 64x512 (65k) | 0.40 | 25k |
41
+ | STDiT (GC, sp=2) | 360x512 (370k) | 0.10 | 18k |
42
+
43
+ 使用 Video-VAE 在时间维度上进行 4 倍下采样时,24fps 视频有 450 帧。STDiT(28k tokens/s) 和 DiT 对图像 (高达 45k tokens/s)
44
+ 两者之间的速度差距主要来自 T5 和 VAE 编码,以及时间注意力。
45
+
46
+ ## 加速的编码器 (T5, VAE)
47
+
48
+ 在训练过程中,文本由 T5 编码,视频由 VAE 编码。通常有两种方法可以加速训练:
49
+
50
+ 1. 提前预处理文本和视频数据并保存到磁盘。
51
+ 2. 在训练过程中对文本和视频数据进行编码,并加快编码过程。
52
+
53
+ 对于选项 1,一个样本的 120 个令牌需要 1M 磁盘空间,而 64x64x64 的潜在可能需要 4M。考虑训练 包含 10M 视频剪辑的数据集,所需的总磁盘空间为
54
+ 50TB。我们的存储系统目前还没有准备好 这种数据规模。
55
+
56
+ 对于选项 2,我们提高了 T5 速度和内存要求。根据在[OpenDiT](https://github.com/NUS-HPC-AI-Lab/OpenDiT),我们发现 VAE
57
+ 消耗了大量的 GPU 内存。因此,我们
58
+ 将批大小拆分为较小的批大小,以便进行 VAE 编码。使用这两种技术,我们可以大大加快训练速度。
59
+
60
+ 训练速度是在 8 个带有 STDiT 的 H800 GPU 上测量的。
61
+
62
+ | 加速模式 | 设置 | 吞吐量 (img/s/GPU) | 吞吐量 (tokens/s/GPU) |
63
+ |--------------|---------------|-----------------|--------------------|
64
+ | Baseline | 16x256 (4k) | 6.16 | 25k |
65
+ | w. faster T5 | 16x256 (4k) | 7.00 | 29k |
66
+ | Baseline | 64x512 (65k) | 0.94 | 15k |
67
+ | w. both | 64x512 (65k) | 1.45 | 23k |
exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/commands.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 命令
2
+
3
+ ## 推理
4
+
5
+ 您可以修改相应的配置文件来更改推理设置。在 [此处](/docs/structure.md#inference-config-demos) 查看更多详细信息。
6
+
7
+ ### 在 ImageNet 上使用 DiT 预训练进行推理
8
+
9
+ 以下命令会自动在 ImageNet 上下载预训练权重并运行推理。
10
+
11
+ ```bash
12
+ python scripts/inference.py configs/dit/inference/1x256x256-class.py --ckpt-path DiT-XL-2-256x256.pt
13
+ ```
14
+
15
+ ### 在 UCF101 上使用 Latte 预训练进行推理
16
+
17
+ 以下命令会自动下载 UCF101 上的预训练权重并运行推理。
18
+
19
+ ```bash
20
+ python scripts/inference.py configs/latte/inference/16x256x256-class.py --ckpt-path Latte-XL-2-256x256-ucf101.pt
21
+ ```
22
+
23
+ ### 使用 PixArt-α 预训练权重进行推理
24
+
25
+ 将 T5 下载到 `./pretrained_models` 并运行以下命令。
26
+
27
+ ```bash
28
+ # 256x256
29
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/pixart/inference/1x256x256.py --ckpt-path PixArt-XL-2-256x256.pth
30
+
31
+ # 512x512
32
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/pixart/inference/1x512x512.py --ckpt-path PixArt-XL-2-512x512.pth
33
+
34
+ # 1024 multi-scale
35
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/pixart/inference/1x1024MS.py --ckpt-path PixArt-XL-2-1024MS.pth
36
+ ```
37
+
38
+ ### 使用训练期间保存的 checkpoints 进行推理
39
+
40
+ 在训练期间,会在 `outputs` 目录中创建一个实验日志记录文件夹。在每个 checkpoint 文件夹下(例如 `epoch12-global_step2000`),有一个 `ema.pt` 文件和共享的 `model` 文件夹。执行以下命令进行推理。
41
+
42
+ ```bash
43
+ # 使用 ema 模型进行推理
44
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path outputs/001-STDiT-XL-2/epoch12-global_step2000/ema.pt
45
+
46
+ # 使用模型进行推理
47
+ torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path outputs/001-STDiT-XL-2/epoch12-global_step2000
48
+
49
+ # 使用序列并行进行推理
50
+ # 当 nproc_per_node 大于 1 时,将自动启用序列并行
51
+ torchrun --standalone --nproc_per_node 2 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path outputs/001-STDiT-XL-2/epoch12-global_step2000
52
+ ```
53
+
54
+ 第二个命令将在 checkpoint 文件夹中自动生成一个 `model_ckpt.pt` 文件。
55
+
56
+ ### 推理超参数
57
+
58
+ 1. DPM 求解器擅长对图像进行快速推理。但是,它的视频推理的效果并不令人满意。若出于快速演示目的您可以使用这个求解器。
59
+
60
+ ```python
61
+ type="dmp-solver"
62
+ num_sampling_steps=20
63
+ ```
64
+
65
+ 2. 您可以在视频推理上使用 [SVD](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt) 微调的 VAE 解码器(消耗更多内存)。但是,我们没有看到视频推理效果有明显改善。要使用它,请将 [预训练权重](https://huggingface.co/maxin-cn/Latte/tree/main/t2v_required_models/vae_temporal_decoder) 下载到 `./pretrained_models/vae_temporal_decoder` 中,并修改配置文件,如下所示。
66
+
67
+ ```python
68
+ vae = dict(
69
+ type="VideoAutoencoderKLTemporalDecoder",
70
+ from_pretrained="pretrained_models/vae_temporal_decoder",
71
+ )
72
+ ```
73
+
74
+ ## 训练
75
+
76
+ 如果您要继续训练,请运行以下命令。参数 ``--load`` 和 ``--ckpt-path`` 不同之处在于,它会加载优化器和数据加载器的状态。
77
+
78
+ ```bash
79
+ torchrun --nnodes=1 --nproc_per_node=8 scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --load YOUR_PRETRAINED_CKPT
80
+ ```
81
+
82
+ 如果要启用 wandb 日志,请添加到 `--wandb` 参数到命令中。
83
+
84
+ ```bash
85
+ WANDB_API_KEY=YOUR_WANDB_API_KEY torchrun --nnodes=1 --nproc_per_node=8 scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --wandb True
86
+ ```
87
+
88
+ 您可以修改相应的配置文件来更改训练设置。在 [此处](/docs/structure.md#training-config-demos) 查看更多详细信息。
89
+
90
+ ### 训练超参数
91
+
92
+ 1. `dtype` 是用于训练的数据类型。仅支持 `fp16` 和 `bf16`。ColossalAI 自动启用 `fp16` 和 `bf16` 的混合精度训练。在训练过程中,我们发现 `bf16` 更稳定。
exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/datasets.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 数据集
2
+
3
+ ## 正在使用的数据集
4
+
5
+ ### HD-VG-130M
6
+
7
+ [HD-VG-130M](https://github.com/daooshee/HD-VG-130M?tab=readme-ov-file) 包括 130M 个文本视频对。标题是
8
+ 由 BLIP-2 生成。我们发现剪切和文本质量相对较差。它包含 20 个拆分。对于 OpenSora 1.0,我们使用第一个拆分。我们计划使用整个数据集并对其进行重新处理。
9
+
10
+ ### Inter4k
11
+
12
+ [Inter4k](https://github.com/alexandrosstergiou/Inter4K) 是一个包含分辨率为 4K 的 1k 视频剪辑的数据集。这个
13
+ 数据集被提议用于超分辨率任务。我们使用数据集进行 HQ 训练。处理过的视频可以从这里找到 [这里](README.md#数据处理) 。
14
+
15
+ ### Pexels.com
16
+
17
+ [Pexels.com](https://www.pexels.com/) 是一个提供免费库存照片和视频的网站。我们收集的 19K 视频
18
+ 来自本网站的剪辑,用于高质量训练。处理过的视频可以从这里找到 [这里](README.md#数据处理) 。
19
+
20
+ ## 数据集监视列表
21
+
22
+ 我们也在关注以下数据集,并考虑在未来使用它们,这取决于我们的存储空间以及数据集的质量。
23
+
24
+ | 名称 | 大小 | 描述 |
25
+ |-------------------|--------------|-------------------------------|
26
+ | Panda-70M | 70M videos | High quality video-text pairs |
27
+ | WebVid-10M | 10M videos | Low quality |
28
+ | InternVid-10M-FLT | 10M videos | |
29
+ | EGO4D | 3670 hours | |
30
+ | OpenDV-YouTube | 1700 hours | |
31
+ | VidProM | 6.69M videos | |
exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/report_v1.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open-Sora v1 技术报告
2
+
3
+ OpenAI的Sora在生成一分钟高质量视频方面非常出色。然而,它几乎没有透露任何关于其细节的信息。为了使人工智能更加“开放”,我们致力于构建一个开源版本的Sora。这份报告描述了我们第一次尝试训练一个基于Transformer的视频扩散模型。
4
+
5
+ ## 选择高效的架构
6
+
7
+ 为了降低计算成本,我们希望利用现有的VAE模型。Sora使用时空VAE来减少时间维度。然而,我们发现没有开源的高质量时空VAE模型。[MAGVIT](https://github.com/google-research/magvit)的4x4x4 VAE并未开源,而[VideoGPT](https://wilson1yan.github.io/videogpt/index.html)的2x4x4 VAE在我们的实验中质量较低。因此,我们决定在我们第一个版本中使用2D VAE(来自[Stability-AI](https://huggingface.co/stabilityai/sd-vae-ft-mse-original))。
8
+
9
+ 视频训练涉及大量的token。考虑到24fps的1分钟视频,我们有1440帧。通过VAE下采样4倍和patch大小下采样2倍,我们得到了1440x1024≈150万个token。在150万个token上进行全注意力计算将带来巨大的计算成本。因此,我们使用时空注意力来降低成本,这是遵循[Latte](https://github.com/Vchitect/Latte)的方法。
10
+
11
+ 如图中所示,在STDiT(ST代表时空)中,我们在每个空间注意力之后立即插入一个时间注意力。这类似于Latte论文中的变种3。然而,我们并没有控制这些变体的相似数量的参数。虽然Latte的论文声称他们的变体比变种3更好,但我们在16x256x256视频上的实验表明,相同数量的迭代次数下,性能排名为:DiT(完整)> STDiT(顺序)> STDiT(并行)≈ Latte。因此,我们出于效率考虑选择了STDiT(顺序)。[这里](/docs/acceleration.md#efficient-stdit)提供了速度基准测试。
12
+
13
+
14
+ ![Architecture Comparison](/assets/readme/report_arch_comp.png)
15
+
16
+ 为了专注于视频生成,我们希望基于一个强大的图像生成模型来训练我们的模型。PixArt-α是一个经过高效训练的高质量图像生成模型,具有T5条件化的DiT结构。我们使用[PixArt-α](https://github.com/PixArt-alpha/PixArt-alpha)初始化我们的模型,并将插入的时间注意力的投影层初始化为零。这种初始化在开始时保留了模型的图像生成能力,而Latte的架构则不能。插入的注意力将参数数量从5.8亿增加到7.24亿。
17
+
18
+ ![Architecture](/assets/readme/report_arch.jpg)
19
+
20
+ 借鉴PixArt-α和Stable Video Diffusion的成功,我们还采用了渐进式训练策略:在366K预训练数据集上进行16x256x256的训练,然后在20K数据集上进行16x256x256、16x512x512和64x512x512的训练。通过扩展位置嵌入,这一策略极大地降低了计算成本。
21
+
22
+ 我们还尝试在DiT中使用3D patch嵌入器。然而,在时间维度上2倍下采样后,生成的视频质量较低。因此,我们将在下一版本中将下采样留给时间VAE。目前,我们在每3帧采样一次进行16帧训练,以及在每2帧采样一次进行64帧训练。
23
+
24
+
25
+ ## 数据是训练高质量模型的核心
26
+
27
+ 我们发现数据的数量和质量对生成视频的质量有很大的影响,甚至比模型架构和训练策略的影响还要大。目前,我们只从[HD-VG-130M](https://github.com/daooshee/HD-VG-130M)准备了第一批分割(366K个视频片段)。这些视频的质量参差不齐,而且字幕也不够准确。因此,我们进一步从提供免费许可视频的[Pexels](https://www.pexels.com/)收集了20k相对高质量的视频。我们使用LLaVA,一个图像字幕模型,通过三个帧和一个设计好的提示来标记视频。有了设计好的提示,LLaVA能够生成高质量的字幕。
28
+
29
+ ![Caption](/assets/readme/report_caption.png)
30
+
31
+ 由于我们更加注重数据质量,我们准备收集更多数据,并在下一版本中构建一个视频预处理流程。
32
+
33
+ ## 训练细节
34
+
35
+ 在有限的训练预算下,我们只进行了一些探索。我们发现学习率1e-4过大,因此将其降低到2e-5。在进行大批量训练时,我们发现`fp16`比`bf16`不太稳定,可能会导致生成失败。因此,我们在64x512x512的训练中切换到`bf16`。对于其他超参数,我们遵循了之前的研究工作。
36
+
37
+ ## 损失曲线
38
+
39
+ 16x256x256 预训练损失曲线
40
+
41
+ ![16x256x256 Pretraining Loss Curve](/assets/readme/report_loss_curve_1.png)
42
+
43
+ 16x256x256 高质量训练损失曲线
44
+
45
+ ![16x256x256 HQ Training Loss Curve](/assets/readme/report_loss_curve_2.png)
46
+
47
+ 16x512x512 高质量训练损失曲线
48
+
49
+ ![16x512x512 HQ Training Loss Curve](/assets/readme/report_loss_curve_3.png)
exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/report_v2.md ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open-Sora 1.1 技术报告
2
+
3
+ - [模型架构修改](#模型架构修改)
4
+ - [支持不同视频长度/分辨率/宽高比/帧率(fps)训练](#支持不同视频长度分辨率宽高比帧率fps训练)
5
+ - [使用Masked DiT作为图生视频/视频生视频模型](#使用masked-dit作为图生视频视频生视频模型)
6
+ - [数据收集和流程](#数据收集和流程)
7
+ - [训练详情](#训练详情)
8
+ - [结果和评价](#结果和评价)
9
+ - [不足和下一步计划](#不足和下一步计划)
10
+
11
+ 在Open-Sora1.1版本中,我们使用了10M数据来训练经过结构调优后的STDiT的700M模型(Open-Sora1.0版本仅用400K数据)。我们实现了[Sora报告](https://openai.com/research/video-generation-models-as-world-simulators)中提到的以下功能:
12
+
13
+ - 可变的视频时长、分辨率、宽高比(包括采样灵活性、改进的取景范围和构图)
14
+ - 提示词增加图片和视频选项(使图像动起来、生成式增长视频、视频到视频编辑、连接不同视频)
15
+ - 图像生成功能
16
+
17
+ 为了实现这一目标,我们在预训练阶段使用了多任务学习。对于扩散模型来说,用不同的采样时间步长进行训练已经是一种多任务学习。我们将这一思想在图像和视频的条件生成模型上,进一步扩展到多分辨率、宽高比、帧长、fps以及不同的掩码策略。我们在**0~15s、144p到720p、各种宽高比的视频**上训练模型。虽然由于训练FLOPs不足的限制,生成的视频在时间一致性上的表现没有那么高,但我们仍然可以看到这个模型的巨大潜力。
18
+
19
+ ## 模型架构修改
20
+
21
+ 我们对原始ST-DiT模型进行了以下修改,以获得更好的训练稳定性和模型性能(ST-DiT-2):
22
+
23
+ - **在时间注意力模块中添加[旋转位置编码](https://arxiv.org/abs/2104.09864)**:遵循目前LLM的最佳实践,我们将时间注意力模块中的正弦位置编码更改为旋转位置编码,因为它也算一项序列预测任务。
24
+ - **在时间注意力模块中添加AdaIN和Layernormal**:我们将时间注意力与AdaIN和Layer范数作为空间注意力包裹起来,以稳定训练。
25
+ - **[QK归一化](https://arxiv.org/abs/2302.05442)与[RMSNorm](https://arxiv.org/abs/1910.07467)**:和[SD3](https://arxiv.org/pdf/2403.03206.pdf)类似地,我们应用QK归一化来提高半精度训练的稳定性。
26
+ - **支持动态输入大小和视频条件限定**:为了支持多分辨率、宽高比和fps训练,我们ST-DiT-2来接受任何输入大小。延申[PixArt-alpha](https://github.com/PixArt-alpha/PixArt-alpha)的想法,我们支持限定视频的高度、宽度、宽高比、帧长和fps。
27
+ - **将T5token数量从120扩展到200**:我们使用的视频描述通常少于200个token,我们发现模型也可以很好地处理更长的文本。
28
+
29
+ ## 支持不同视频长度/分辨率/宽高比/帧率(fps)训练
30
+
31
+ 正如[Sora报告](https://openai.com/research/video-generation-models-as-world-simulators)中提到的,使用原始无损视频的分辨率、宽高比和视频长度进行训练可以增加采样灵活性,改善取景和构图。我们找到了三种实现这一目标的方法:
32
+ - [NaViT](https://arxiv.org/abs/2307.06304):通过不同掩码策略支持在同一训练批次内使用不同大小的数据,并且训练效率下降很少。然而,该系统实现起来有点复杂,并且可能无法兼容kernal优化技术(如flashattention)。
33
+ - 填充([FiT](https://arxiv.org/abs/2402.12376),[Open-Sora-Plan](https://github.com/PKU-YuanGroup/Open-Sora-Plan)):通过填充支持同一批次内的不同大小的数据。然而,将不同的分辨率填充到相同的大小会导致效率降低。
34
+ - 分桶训练([SDXL](https://arxiv.org/abs/2307.01952)、[PixArt](https://arxiv.org/abs/2310.00426)):支持通过分桶的方式在不同批次中动态调整大小,但在同一批次内数据大小必须相同,只能应用固定数量的数据大小。在一个批次中,我们不需要实现复杂的掩码或填充。
35
+
36
+ 为了更便捷的实现,我们选择分桶训练的方式。我们预先定义了一些固定的分辨率,并将不同的样本分配到不同的桶中。下面列出了分桶方案中值得注意的点。但我们可以看到,这些在我们的实验中并不是一个大问题。
37
+
38
+ <details>
39
+ <summary>查看注意事项</summary>
40
+
41
+ - 桶大小被限制为固定数量:首先,在实际应用中,通常只使用少数宽高比(9:16、3:4)和分辨率(240p、1080p)。其次,我们发现经过训练的模型可以很好地推广到未见过的解决方案。
42
+ - 每批的大小相同,打破了独立同分布(i.i.d.)假设:由于我们使用多个 GPU,因此不同 GPU 上的本地批次具有不同的大小。我们没有发现此问题导致性能显着下降。
43
+ - 可能没有足够的样本来填充每个桶,并且分布可能有偏差:首先,当本地批量大小不太大时,我们的数据集足够大以填充每个桶。其次���我们应该分析数据大小的分布并相应地定义桶大小。第三,分配不平衡并没有显着影响训练过程。
44
+ - 不同的分辨率和帧长可能有不同的处理速度:与PixArt只处理相似分辨率(相似token数)的宽高比不同,我们需要考虑不同分辨率和帧长的处理速度。我们可以使用“bucket_config”来定义每个桶的批量大小,以确保处理速度相似。
45
+
46
+ </details>
47
+
48
+ ![bucket](/assets/readme/report_bucket.png)
49
+
50
+ 如图所示,桶是(分辨率,帧数量,宽高比)的三元组。我们为不同的分辨率提供预定义的宽高比,涵盖了大多数常见的视频宽高比。在每个epoch之前,我们打乱数据集并将样本分配到不同的桶中,如图所示。我们将样本放入最大分辨率和帧长度小于视频的桶中。
51
+
52
+ 考虑到我们的计算资源有限,我们进一步为每个(分辨率,num_frame)二元组引入keep_prob和batch_size两个属性,以降低计算成本并实现多阶段训练。具体来说,高清视频将以概率1-keep_prob下采样到较低分辨率的桶中,并且每个桶的样本数量是由batch_size属性决定的。这样,我们可以控制不同桶中的样本数量,并通过为每个桶搜索合适的数据量来平衡GPU负载。
53
+
54
+ 有关训练中桶使用的详细说明,请参阅[配置文件](/docs/config.md#training-bucket-configs).
55
+
56
+ ## 使用Masked DiT作为图生视频/视频生视频模型
57
+
58
+ Transformer可以很容易地扩展到支持图生图和视频生视频的任务。我们提出了一种蒙版策略来支持图像和视频的调节。蒙版策略如下图所示。
59
+
60
+ ![mask strategy](/assets/readme/report_mask.png)
61
+
62
+ 在将图像或视频转换成另一个视频的过程中,我们通常会选择出需要作为条件的帧并取消其掩码(unmask)。在使用ST-DiT模型进行前向传播时,被选择取消掩码(unmask)的帧将被赋予时间步长0,而其他帧则保持它们原有的时间步长t。我们发现,如果直接将这种策略应用到训练好的模型上,会得到较差的结果,因为扩散模型在训练过程中并未学会如何处理一个样本中具有不同时间步长的帧。
63
+
64
+ 受[UL2](https://arxiv.org/abs/2205.05131)的启发,我们在训练期间引入了随机掩码策略。具体来说,我们在训练期间随机取消掩码帧,包括取消掩码第一帧,前k帧,最后k帧,最后k帧,第一和最后k帧,随机帧等。基于Open-Sora 1.0模型,以50%的概率应用掩码策略,我们发现模型能够在10,000步的训练中学会处理图像条件(而30%的概率会导致处理能力变差),同时文本到视频的性能略有下降。因此,在Open-Sora 1.1版本中,我们从头开始预训练模型,并采用了掩码策略。
65
+
66
+ 下图给出了用于推理的掩码策略配置的说明。五数字元组在定义掩码策略方面提供了极大的灵活性。
67
+
68
+ ![mask strategy config](/assets/readme/report_mask_config.png)
69
+
70
+ 掩码策略用法的详细说明可在[配置文件](/docs/config.md#advanced-inference-config)中查看.
71
+
72
+
73
+ ## 数据收集和流程
74
+
75
+ 正如我们在Sora1.0版本中看见的那样,数据数量和质量对于训练一个好的模型至关重要,因此,我们努力扩展数据集。首先,我们创建了一个遵循[SVD](https://arxiv.org/abs/2311.15127)的自动流水线,包括场景切割、字幕、各种评分和过滤以及数据集管理脚本和通用惯例。
76
+
77
+ ![pipeline](/assets/readme/report_data_pipeline.png)
78
+
79
+ 我们计划使用[panda-70M](https://snap-research.github.io/Panda-70M/)和其他数据来训练模型,大约包含3000万条数据。然而,我们发现磁盘输入输出(disk IO)在同时进行训练和数据处理时成为了一个瓶颈。因此,我们只能准备一个包含1000万条数据的数据集,并且没有完成我们构建的所有处理流程。最终,我们使用了包含970万视频和260万图像的数据集进行预训练,以及560,000视频和160万图像的数据集进行微调。预训练数据集的统计信息如下所示。
80
+
81
+ 图像文本标记 (使用T5分词器):
82
+ ![image text tokens](/assets/readme/report_image_textlen.png)
83
+
84
+ 视频文本标记 (使用T5分词器)。我们直接使用Panda的短视频描述进行训练,并自己给其他数据集加视频描述。生成的字幕通常少于200个token。
85
+ ![video text tokens](/assets/readme/report_video_textlen.png)
86
+
87
+ 视频时长:
88
+ ![video duration](/assets/readme/report_video_duration.png)
89
+
90
+ ## 训练详情
91
+
92
+ 由于计算资源有限,我们必须仔细监控训练过程,并在推测模型学习不佳时更改训练策略,因为没有消融研究的计算。因此,Open-Sora1.1版本的训练包括多个更改,所以,指数移动平均(EMA)未被应用。
93
+
94
+ 1. 首先,我们从`Pixart-alpha-1024`的模型checkpoint开始,使用不同分辨率的图像进行了6000步的微调。我们发现模型能够很容易地适应并生成不同分辨率的图像。为了加快��散过程的训练,我们使用了[SpeeDiT](https://github.com/1zeryu/SpeeDiT)(iddpm-speed)技术。
95
+ 2. **[阶段一]** 然后,我们使用梯度检查点(gradient-checkpointing)技术对模型进行了**24,000**步的预训练,这个过程在64个H800 GPU上运行了**4天**。尽管模型看到的数据样本数量相同,我们发现与使用较小批量大小相比,模型的学习速度较慢。我们推测,在训练的早期阶段,步数的数量对于训练更为重要。大多数视频的分辨率是**240p**,预训练时使用的配置与[stage2.py](/configs/opensora-v1-1/train/stage2.py)相似。
96
+ 3. **[阶段一]** 为了增加训练步数,我们改用了更小的批量大小,并且没有使用梯度检查点技术。在这个阶段,我们还引入了帧率(fps)条件。模型训练了**40,000**步,持续了**2天**。训练中使用的视频大多数是**144p**分辨率,使用的配置文件是[stage1.py](/configs/opensora-v1-1/train/stage1.py)。我们使用较低的分辨率,因为我们在Open-Sora 1.0版本中发现模型可以以相对较低的分辨率学习时间知识。
97
+ 4. **[阶段一]** 我们发现模型不能很好地学习长视频,并在Open-Sora1.0训练中发现了一个噪声生成结果,推测是半精度问题。因此,我们采用QK-归一化来稳定训练。我们还将iddpm-speed切换成iddpm。我们训练了**17k**步**14小时**。大多数视频的分辨率是144p,预训练时使用的配置是[stage1.py](/configs/opensora-v1-1/train/stage1.py)。阶段1训练持续约一周,总步长**81k**。
98
+ 5. **[阶段二]** 我们切换到更高的分辨率,其中大多数视频是**240p和480p**分辨率([stage2.py](/configs/opensora-v1-1/train/stage2.py))。我们在所有预训练数据上训练了**22000**步,持续**一天**。
99
+ 6. **[阶段三]** 我们切换到更高的分辨率,大多数视频的分辨率是**480p和720p**([stage3.py](/configs/opensora-v1-1/train/stage3.py))。我们在高质量数据上训了**4000**步,用时**一天**。
100
+
101
+ ## 结果和评价
102
+
103
+ ## 不足和下一步计划
104
+
105
+ 随着我们离Sora的复现又近了一步,我们发现当前模型存在许多不足,这些不足将在我们下阶段工作中得到改善。
106
+
107
+ - **噪音的生成和影响**:我们发现生成的模型,特别是长视频中,有时很多噪点,不流畅。我们认为问题在于没有使用时间VAE。由于[Pixart-Sigma](https://arxiv.org/abs/2403.04692)发现适应新VAE很容易,我们计划在下一个版本中为模型开发时间VAE。
108
+ - **缺乏时间一致性**:我们发现模型无法生成具有高时间一致性的视频,我们认为问题是由于缺乏训练FLOPs,我们计划收集更多数据并继续训练模型以提高时间一致性。
109
+ - **人像生成质量低**:我们发现模型无法生成高质量的人类视频,我们认为问题是由于缺乏人类数据,我们计划收集更多的人类数据,并继续训练模型以提高人类生成。
110
+ - **美学得分低**:我们发现模型的美学得分不高。问题在于缺少美学得分过滤,由于IO瓶颈没我们没有进行这一步骤。我们计划通过美学得分和微调模型来过滤数据,以提高美学得分。
111
+ - **长视频生成质量低**:我们发现,使用同样的提示词,视频越长,质量越差。这意味着图像质量不能同等地被不同长度的序列所适应。
112
+
113
+ > - **算法与加速实现**:Zangwei Zheng, Xiangyu Peng, Shenggui Li, Hongxing Liu, Yukun Zhou
114
+ > - **数据收集与处理**:Xiangyu Peng, Zangwei Zheng, Chenhui Shen, Tom Young, Junjie Wang, Chenfeng Yu
exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/report_v3.md ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open-Sora 1.2 报告
2
+
3
+ - [视频压缩网络](#视频压缩网络)
4
+ - [整流流和模型适应](#整流流和模型适应)
5
+ - [更多数据和更好的多阶段训练](#更多数据和更好的多阶段训练)
6
+ - [简单有效的模型调节](#简单有效的模型调节)
7
+ - [评估](#评估)
8
+
9
+ 在 Open-Sora 1.2 版本中,我们在 >30M 数据上训练了 一个1.1B 的模型,支持 0s~16s、144p 到 720p、各种宽高比的视频生成。我们的配置如下所列。继 1.1 版本之后,Open-Sora 1.2 还可以进行图像到视频的生成和视频扩展。
10
+
11
+ | | 图像 | 2秒 | 4秒 | 8秒 | 16秒 |
12
+ | ---- | ----- | --- | --- | --- | --- |
13
+ | 240p | ✅ | ✅ | ✅ | ✅ | ✅ |
14
+ | 360p | ✅ | ✅ | ✅ | ✅ | ✅ |
15
+ | 480p | ✅ | ✅ | ✅ | ✅ | 🆗 |
16
+ | 720p | ✅ | ✅ | ✅ | 🆗 | 🆗 |
17
+
18
+ 这里✅表示在训练期间可以看到数据,🆗表示虽然没有经过训练,但模型可以在该配置下进行推理。🆗的推理需要多个80G内存的GPU和序列并行。
19
+
20
+ 除了 Open-Sora 1.1 中引入的功能外,Open-Sora 1.2 还有以下重磅更新:
21
+
22
+ - 视频压缩网络
23
+ - 整流流训练
24
+ - 更多数据和更好的多阶段训练
25
+ - 简单有效的模型调节
26
+ - 更好的评估指标
27
+
28
+ 上述改进的所有实现(包括训练和推理)均可在 Open-Sora 1.2 版本中使用。以下部分将介绍改进的细节。我们还改进了代码库和文档,使其更易于使用。
29
+
30
+ ## 视频压缩网络
31
+
32
+ 对于 Open-Sora 1.0 & 1.1,我们使用了 stable-ai 的 83M 2D VAE,它仅在空间维度上压缩,将视频压缩 8x8 倍。为了减少时间维度,我们每三帧提取一帧。然而,这种方法导致生成的视频流畅度较低,因为牺牲了生成的帧率(fps)。因此,在这个版本中,我们引入了像 OpenAI 的 Sora 一样的视频压缩网络。该网络在时域上将视频大小压缩至四分之一,因此,我们不必再额外抽帧,而可以使用原有帧率生成模型。
33
+
34
+ 考虑到训练 3D VAE 的计算成本很高,我们希望重新利用在 2D VAE 中学到的知识。我们注意到,经过 2D VAE 压缩后,时间维度上相邻的特征仍然高度相关。因此,我们提出了一个简单的视频压缩网络,首先将视频在空间维度上压缩 8x8 倍,然后将视频在时间维度上压缩 4 倍。网络如下所示:
35
+
36
+ ![video_compression_network](/assets/readme/report_3d_vae.png)
37
+
38
+ 我们用[SDXL 的 VAE](https://huggingface.co/stabilityai/sdxl-vae)初始化 2D VAE ,它比我们以前使用的更好。对于 3D VAE,我们采用[Magvit-v2](https://magvit.cs.cmu.edu/v2/)中的 VAE 结构,它包含 300M 个参数。加上 83M 的 2D VAE,视频压缩网络的总参数为 384M。我们设定batch size 为 1, 对 3D VAE 进行了 1.2M 步的训练。训练数据是来自 pixels 和 pixabay 的视频,训练视频大小主要是 17 帧,256x256 分辨率。3D VAE 中使用causal convolotions使图像重建更加准确。
39
+
40
+ 我们的训练包括三个阶段:
41
+
42
+ 1. 对于前 380k 步,我们冻结 2D VAE并在 8 个 GPU 上进行训练。训练目标包括重建 2D VAE 的压缩特征(图中粉红色),并添加损失以使 3D VAE 的特征与 2D VAE 的特征相似(粉红色和绿色,称为identity loss)。我们发现后者的损失可以快速使整个 VAE 在图像上取得良好的性能,并在下一阶段更快地收敛。
43
+ 2. 对于接下来的 260k 步,我们消除identity loss并仅学习 3D VAE。
44
+ 3. 对于最后 540k 步,由于我们发现仅重建 2D VAE 的特征无法带来进一步的改进,因此我们移除了loss并训练整个 VAE 来重建原始视频。此阶段在 24 个 GPU 上进行训练。
45
+
46
+ 对于训练的前半部分,我们采用 20% 的图像和 80% 的视频。按照[Magvit-v2](https://magvit.cs.cmu.edu/v2/),我们使用 17 帧训练视频,同时对图像的前 16 帧进行零填充。然而,我们发现这种设置会导致长度不同于 17 帧的视频变得模糊。因此,在第 3 阶段,我们使用不超过34帧长度的任意帧长度视频进行混合视频长度训练,以使我们的 VAE 对不同视频长度更具鲁棒性(也就是说,如果我们希望训练含有n帧的视频,我们就把原视频中`34-n`帧用0进行填充)。我们的 [训练](/scripts/train_vae.py)和[推理](/scripts/inference_vae.py)代码可在 Open-Sora 1.2 版本中找到。
47
+
48
+ 当使用 VAE 进行扩散模型时,我们的堆叠 VAE 所需的内存较少,因为我们的 VAE 的输入已经经过压缩。我们还将输入视频拆分为几个 17 帧剪辑,以提高推理效率。我们的 VAE 与[Open-Sora-Plan](https://github.com/PKU-YuanGroup/Open-Sora-Plan/blob/main/docs/Report-v1.1.0.md)中的另一个开源 3D VAE 性能相当。
49
+
50
+ | 模型 | 结构相似性↑ | 峰值信噪比↑ |
51
+ | ------------------ | ----- | ------ |
52
+ | Open-Sora-Plan 1.1 | 0.882 | 29.890 |
53
+ | Open-Sora 1.2 | 0.880 | 30.590 |
54
+
55
+ ## 整流流和模型适应
56
+
57
+ 最新的扩散模型 Stable Diffusion 3 为了获得更好的性能,采用了[rectified flow](https://github.com/gnobitab/RectifiedFlow)替代了 DDPM。可惜 SD3 的 rectified flow 训练代码没有开源。不过 Open-Sora 1.2 提供了遵循 SD3 论文的训练代码,包括:
58
+
59
+ - 基本整流流训练
60
+ - 用于训练加速的 Logit-norm 采样
61
+ - 分辨率和视频长度感知时间步长采样
62
+
63
+ 对于分辨率感知的时间步长采样,我们应该对分辨率较大的图像使用更多的噪声。我们将这个想法扩展到视频生成,对长度较长的视频使用更多的噪声。
64
+
65
+ Open-Sora 1.2 从[PixArt-Σ 2K](https://github.com/PixArt-alpha/PixArt-sigma) 模型checkpoint开始。请注意,此模型使用 DDPM 和 SDXL VAE 进行训练,分辨率也高得多。我们发现在小数据集上进行微调可以轻松地使模型适应我们的视频生成设置。适应过程如下,所有训练都在 8 个 GPU 上完成:
66
+
67
+ 1. 多分辨率图像生成能力:我们训练模型以 20k 步生成从 144p 到 2K 的不同分辨率。
68
+ 2. QK-norm:我们将 QK-norm 添加到模型中并训练 18k 步。
69
+ 3. 整流流:我们从离散时间 DDPM 转变为连续时间整流流并训练 10k 步。
70
+ 4. 使用 logit-norm 采样和分辨率感知时间步采样的整流流:我们训练 33k 步。
71
+ 5. 较小的 AdamW epsilon:按照 SD3,使用 QK-norm,我们可以对 AdamW 使用较小的 epsilon(1e-15),我们训练 8k 步。
72
+ 6. 新的 VAE 和 fps 调节:我们用自己的 VAE 替换原来的 VAE,并将 fps 调节添加到时间步调节中,我们训练 25k 步。请注意,对每个通道进行规范化对于整流流训练非常重要。
73
+ 7. 时间注意力模块:我们添加时间注意力模块,其中没有初始化投影层。我们在图像上进行 3k 步训练。
74
+ 8. 仅针对具有掩码策略的视频的时间块:我们仅在视频上训练时间注意力块,步长为 38k。
75
+
76
+ 经过上述调整后,我们就可以开始在视频上训练模型了。上述调整保留了原始模型生成高质量图像的能力,并未后续的视频生成提供了许多助力:
77
+
78
+ - 通过整流,我们可以加速训练,将视频的采样步数从100步减少到30步,大大减少了推理的等待时间。
79
+ - 使用 qk-norm,训练更加稳定,并且可以使用积极的优化器。
80
+ - 采用新的VAE,时间维度压缩了4倍,使得训练更加高效。
81
+ - 该模型具有多分辨率图像生成能力,可以生成不同分辨率的视频。
82
+
83
+ ## 更多数据和更好的多阶段训练
84
+
85
+ 由于计算预算有限,我们精心安排了训练数据的质量从低到高,并将训练分为三个阶段。我们的训练涉及 12x8 GPU,总训练时间约为 2 周, 约70k步。
86
+
87
+ ### 第一阶段
88
+
89
+ 我们首先在 Webvid-10M 数据集(40k 小时)上训练模型,共 30k 步(2 个 epoch)。由于视频分辨率均低于 360p 且包含水印,因此我们首先在此数据集上进行训练。训练主要在 240p 和 360p 上进行,视频长度为 2s~16s。我们使用数据集中的原始字幕进行训练。训练配置位于[stage1.py](/configs/opensora-v1-2/train/stage1.py)中。
90
+
91
+ ### 第二阶段
92
+
93
+ 然后我们在 Panda-70M 数据集上训练模型。这个数据集很大,但质量参差不齐。我们使用官方的 30M 子集,其中的片段更加多样化,并过滤掉美学评分低于 4.5 的视频。这产生了一个 20M 子集,包含 41k 小时。数据集中的字幕直接用于我们的训练。训练配置位于[stage2.py](/configs/opensora-v1-2/train/stage2.py)中。
94
+
95
+ 训练主要在 360p 和 480p 上进行。我们训练模型 23k 步,即 0.5 个 epoch。训练尚未完成,因为我们希望我们的新模型能早日与大家见面。
96
+
97
+ ### 第三阶段
98
+
99
+ 在此阶段,我们从各种来源收集了 200 万个视频片段,总时长 5000 小时,其中包括:
100
+
101
+ - 来自 Pexels、Pixabay、Mixkit 等的免费授权视频。
102
+ - [MiraData](https://github.com/mira-space/MiraData):一个包含长视频的高质量数据集,主要来自游戏和城市/风景探索。
103
+ - [Vript](https://github.com/mutonix/Vript/tree/main):一个密集注释的数据集。
104
+ - 还有一些其他数据集。
105
+
106
+ MiraData 和 Vript 有来自 GPT 的字幕,而我们使用[PLLaVA](https://github.com/magic-research/PLLaVA)为其余字幕添加字幕。与只能进行单帧/图像字幕的 LLaVA 相比,PLLaVA 是专门为视频字幕设计和训练的。[加速版PLLaVA](/tools/caption/README.md#pllava-captioning)已在我们的`tools/`中发布。在实践中,我们使用预训练的 PLLaVA 13B 模型,并从每个视频中选择 4 帧生成字幕,空间池化形状为 2*2。
107
+
108
+ 下面显示了此阶段使用的视频数据的一些统计数据。我们提供了持续时间和分辨率的基本统计数据,以及美学分数和光流分数分布。我们还从视频字幕中提取了对象和动作的标签并计算了它们的频率。
109
+ ![stats](/assets/readme/report-03_video_stats.png)
110
+ ![object_count](/assets/readme/report-03_objects_count.png)
111
+ ![object_count](/assets/readme/report-03_actions_count.png)
112
+
113
+ 此阶段我们主要在 720p 和 1080p 上进行训练,以提高模型在高清视频上的表现力。在训练中,我们使用的掩码率为25%。训练配置位于[stage3.py](/configs/opensora-v1-2/train/stage3.py)中。我们对模型进行 15k 步训练,大约为 2 个 epoch。
114
+
115
+ ## 简单有效的模型调节
116
+
117
+ 对于第 3 阶段,我们计算每个视频片段的美学分数和运动分数。但是,由于视频片段数量较少,我们不愿意过滤掉得分较低的片段,这会导致数据集较小。相反,我们将分数附加到字幕中并将其用作条件。我们发现这种方法可以让模型了解分数并遵循分数来生成质量更好的视频。
118
+
119
+ 例如,一段美学评分为 5.5、运动评分为 10 且检测到摄像头运动向左平移的视频,其字幕将为:
120
+
121
+ ```plaintext
122
+ [Original Caption] aesthetic score: 5.5, motion score: 10, camera motion: pan left.
123
+ ```
124
+
125
+ 在推理过程中,我们还可以使用分数来调节模型。对于摄像机运动,我们仅标记了 13k 个具有高置信度的剪辑,并且摄像机运动检测模块已在我们的工具中发布。
126
+
127
+ ## 评估
128
+
129
+ 之前,我们仅通过人工评估来监控训练过程,因为 DDPM 训练损失与生成的视频质量没有很好的相关性。但是,对于校正流,如 SD3 中所述,我们发现训练损失与生成的视频质量有很好的相关性。因此,我们跟踪了 100 张图像和 1k 个视频的校正流评估损失。
130
+
131
+ 我们从 pixabay 中抽样了 1k 个视频作为验证数据集。我们计算了不同分辨率(144p、240p、360p、480p、720p)下图像和不同长度的视频(2s、4s、8s、16s)的评估损失。对于每个设置,我们等距采样 10 个时间步长。然后对所有损失取平均值。
132
+
133
+ ![Evaluation Loss](/assets/readme/report_val_loss.png)
134
+ ![Video Evaluation Loss](/assets/readme/report_vid_val_loss.png)
135
+
136
+ 此外,我们还会在训练过程中跟踪[VBench](https://vchitect.github.io/VBench-project/)得分。VBench 是用于短视频生成的自动视频评估基准。我们用 240p 2s 视频计算 vbench 得分。这两个指标验证了我们的模型在训练过程中持续改进。
137
+
138
+ ![VBench](/assets/readme/report_vbench_score.png)
139
+
140
+ 所有评估代码均发布在`eval`文件夹中。查看[评估指南](/eval/README.md)了解更多详细信息。
141
+
142
+ |模型 | 总得分 | 质量得分 | 语义分数 |
143
+ | -------------- | ----------- | ------------- | -------------- |
144
+ | Open-Sora V1.0 | 75.91% | 78.81% | 64.28% |
145
+ | Open-Sora V1.2 | 79.23% | 80.71% | 73.30% |
146
+
147
+ ## 序列并行
148
+
149
+ 我们使用序列并行来支持长序列训练和推理。我们的实现基于Ulysses,工作流程如下所示。启用序列并行后,我们只需要将 `all-to-all` 通信应用于STDiT中的空间模块(spatial block),因为在序列维度上,只有对空间信息的计算是相互依赖的。
150
+
151
+ ![SP](/assets/readme/sequence_parallelism.jpeg)
152
+
153
+ 目前,由于训练数据分辨率较小,我们尚未使用序列并行进行训练,我们计划在下一个版本中使用。至于推理,我们可以使用序列并行,以防您的 GPU 内存不足。下表显示,序列并行可以实现加速:
154
+
155
+ | 分辨率 | 时长 | GPU数量 | 是否启用序列并行 |用时(秒) | 加速效果/GPU |
156
+ | ---------- | ------- | -------------- | --------- | ------------ | --------------- |
157
+ | 720p | 16秒 | 1 | 否 | 547.97 | - |
158
+ | 720p | 16s秒 | 2 | 是 | 244.38 | 12% |
159
+
exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/structure.md ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 代码仓库和配置文件结构
2
+
3
+ ## 代码仓库结构
4
+
5
+ ```plaintext
6
+ Open-Sora
7
+ ├── README.md
8
+ ├── docs
9
+ │ ├── acceleration.md -> Acceleration & Speed benchmark
10
+ │ ├── command.md -> Commands for training & inference
11
+ │ ├── datasets.md -> Datasets used in this project
12
+ │ ├── structure.md -> This file
13
+ │ └── report_v1.md -> Report for Open-Sora v1
14
+ ├── scripts
15
+ │ ├── train.py -> diffusion training script
16
+ │ └── inference.py -> Report for Open-Sora v1
17
+ ├── configs -> Configs for training & inference
18
+ ├── opensora
19
+ │ ├── __init__.py
20
+ │ ├── registry.py -> Registry helper
21
+ │   ├── acceleration -> Acceleration related code
22
+ │   ├── dataset -> Dataset related code
23
+ │   ├── models
24
+ │   │   ├── layers -> Common layers
25
+ │   │   ├── vae -> VAE as image encoder
26
+ │   │   ├── text_encoder -> Text encoder
27
+ │   │   │   ├── classes.py -> Class id encoder (inference only)
28
+ │   │   │   ├── clip.py -> CLIP encoder
29
+ │   │   │   └── t5.py -> T5 encoder
30
+ │   │   ├── dit
31
+ │   │   ├── latte
32
+ │   │   ├── pixart
33
+ │   │   └── stdit -> Our STDiT related code
34
+ │   ├── schedulers -> Diffusion schedulers
35
+ │   │   ├── iddpm -> IDDPM for training and inference
36
+ │   │ └── dpms -> DPM-Solver for fast inference
37
+ │ └── utils
38
+ └── tools -> Tools for data processing and more
39
+ ```
40
+
41
+ ## 配置文件结构
42
+
43
+
44
+ 我们的配置文件遵循[MMEgine](https://github.com/open-mmlab/mmengine)。 MMEngine 将读取配置文件(“.py”文件)并将其解析为类似字典的对象。
45
+
46
+ ```plaintext
47
+ Open-Sora
48
+ └── configs -> Configs for training & inference
49
+ ├── opensora -> STDiT related configs
50
+ │ ├── inference
51
+ │ │ ├── 16x256x256.py -> Sample videos 16 frames 256x256
52
+ │ │ ├── 16x512x512.py -> Sample videos 16 frames 512x512
53
+ │ │ └── 64x512x512.py -> Sample videos 64 frames 512x512
54
+ │ └── train
55
+ │ ├── 16x256x256.py -> Train on videos 16 frames 256x256
56
+ │ ├── 16x256x256.py -> Train on videos 16 frames 256x256
57
+ │ └── 64x512x512.py -> Train on videos 64 frames 512x512
58
+ ├── dit -> DiT related configs
59
+    │   ├── inference
60
+    │   │   ├── 1x256x256-class.py -> Sample images with ckpts from DiT
61
+    │   │   ├── 1x256x256.py -> Sample images with clip condition
62
+    │   │   └── 16x256x256.py -> Sample videos
63
+    │   └── train
64
+    │     ├── 1x256x256.py -> Train on images with clip condition
65
+    │      └── 16x256x256.py -> Train on videos
66
+ ├── latte -> Latte related configs
67
+ └── pixart -> PixArt related configs
68
+ ```
69
+
70
+ ## 推理配置演示
71
+
72
+ 要更改推理设置,可以直接修改相应的配置文件。或者您可以传递参数来覆盖配置文件([config_utils.py](/opensora/utils/config_utils.py))。要更改采样提示,您应该修改传递给“--prompt_path”参数的“.txt”文件。
73
+
74
+ ```plaintext
75
+ --prompt_path ./assets/texts/t2v_samples.txt -> prompt_path
76
+ --ckpt-path ./path/to/your/ckpt.pth -> model["from_pretrained"]
77
+ ```
78
+
79
+ 下面提供了每个字段的解释。
80
+
81
+ ```python
82
+ # Define sampling size
83
+ num_frames = 64 # number of frames
84
+ fps = 24 // 2 # frames per second (divided by 2 for frame_interval=2)
85
+ image_size = (512, 512) # image size (height, width)
86
+
87
+ # Define model
88
+ model = dict(
89
+ type="STDiT-XL/2", # Select model type (STDiT-XL/2, DiT-XL/2, etc.)
90
+ space_scale=1.0, # (Optional) Space positional encoding scale (new height / old height)
91
+ time_scale=2 / 3, # (Optional) Time positional encoding scale (new frame_interval / old frame_interval)
92
+ enable_flash_attn=True, # (Optional) Speed up training and inference with flash attention
93
+ enable_layernorm_kernel=True, # (Optional) Speed up training and inference with fused kernel
94
+ from_pretrained="PRETRAINED_MODEL", # (Optional) Load from pretrained model
95
+ no_temporal_pos_emb=True, # (Optional) Disable temporal positional encoding (for image)
96
+ )
97
+ vae = dict(
98
+ type="VideoAutoencoderKL", # Select VAE type
99
+ from_pretrained="stabilityai/sd-vae-ft-ema", # Load from pretrained VAE
100
+ micro_batch_size=128, # VAE with micro batch size to save memory
101
+ )
102
+ text_encoder = dict(
103
+ type="t5", # Select text encoder type (t5, clip)
104
+ from_pretrained="DeepFloyd/t5-v1_1-xxl", # Load from pretrained text encoder
105
+ model_max_length=120, # Maximum length of input text
106
+ )
107
+ scheduler = dict(
108
+ type="iddpm", # Select scheduler type (iddpm, dpm-solver)
109
+ num_sampling_steps=100, # Number of sampling steps
110
+ cfg_scale=7.0, # hyper-parameter for classifier-free diffusion
111
+ )
112
+ dtype = "fp16" # Computation type (fp16, fp32, bf16)
113
+
114
+ # Other settings
115
+ batch_size = 1 # batch size
116
+ seed = 42 # random seed
117
+ prompt_path = "./assets/texts/t2v_samples.txt" # path to prompt file
118
+ save_dir = "./samples" # path to save samples
119
+ ```
120
+
121
+ ## 训练配置演示
122
+
123
+ ```python
124
+ # Define sampling size
125
+ num_frames = 64
126
+ frame_interval = 2 # sample every 2 frames
127
+ image_size = (512, 512)
128
+
129
+ # Define dataset
130
+ root = None # root path to the dataset
131
+ data_path = "CSV_PATH" # path to the csv file
132
+ use_image_transform = False # True if training on images
133
+ num_workers = 4 # number of workers for dataloader
134
+
135
+ # Define acceleration
136
+ dtype = "bf16" # Computation type (fp16, bf16)
137
+ grad_checkpoint = True # Use gradient checkpointing
138
+ plugin = "zero2" # Plugin for distributed training (zero2, zero2-seq)
139
+ sp_size = 1 # Sequence parallelism size (1 for no sequence parallelism)
140
+
141
+ # Define model
142
+ model = dict(
143
+ type="STDiT-XL/2",
144
+ space_scale=1.0,
145
+ time_scale=2 / 3,
146
+ from_pretrained="YOUR_PRETRAINED_MODEL",
147
+ enable_flash_attn=True, # Enable flash attention
148
+ enable_layernorm_kernel=True, # Enable layernorm kernel
149
+ )
150
+ vae = dict(
151
+ type="VideoAutoencoderKL",
152
+ from_pretrained="stabilityai/sd-vae-ft-ema",
153
+ micro_batch_size=128,
154
+ )
155
+ text_encoder = dict(
156
+ type="t5",
157
+ from_pretrained="DeepFloyd/t5-v1_1-xxl",
158
+ model_max_length=120,
159
+ shardformer=True, # Enable shardformer for T5 acceleration
160
+ )
161
+ scheduler = dict(
162
+ type="iddpm",
163
+ timestep_respacing="", # Default 1000 timesteps
164
+ )
165
+
166
+ # Others
167
+ seed = 42
168
+ outputs = "outputs" # path to save checkpoints
169
+ wandb = False # Use wandb for logging
170
+
171
+ epochs = 1000 # number of epochs (just large enough, kill when satisfied)
172
+ log_every = 10
173
+ ckpt_every = 250
174
+ load = None # path to resume training
175
+
176
+ batch_size = 4
177
+ lr = 2e-5
178
+ grad_clip = 1.0 # gradient clipping
179
+ ```
exp_code/1_benchmark/Open-Sora_v12/docs/zh_CN/vae.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # VAE 技术报告
2
+
3
+ 由于 [Pixart-Sigma](https://arxiv.org/abs/2403.04692) 论文中指出适应新的VAE很简单,因此我们开发了一个额外的时间VAE。
4
+ 具体而言, 我们的VAE由一个[空间 VAE](https://huggingface.co/PixArt-alpha/pixart_sigma_sdxlvae_T5_diffusers)和一个时间VA相接的形式组成.
5
+ 对于时间VAE,我们遵循 [MAGVIT-v2](https://arxiv.org/abs/2310.05737)的实现, 并做了以下修改:
6
+
7
+ * 我们删除了码本特有的架构。
8
+ * 我们不使用鉴别​​器(discriminator),而是使用VAE重建损失、kl损失和感知损失进行训练。
9
+ * 在编码器的最后一个线性层中,我们缩小到 4 通道的对角高斯分布,遵循我们之前训练的接受 4 通道输入的 STDiT。
10
+ * 我们的解码器与编码器架构对称。
11
+
12
+ ## 训练
13
+ 我们分不同阶段训练模型。
14
+
15
+ 我们首先通过在单台机器(8 个 GPU)上冻结空间 VAE 380k 步来训练时间 VAE。我们使用额外的身份损失使 3D VAE 的特征与 2D VAE 的特征相似。我们使用 20% 的图像和 80% 的视频(17 帧)来训练 VAE。
16
+
17
+ ```bash
18
+ torchrun --nnodes=1 --nproc_per_node=8 scripts/train_vae.py configs/vae/train/stage1.py --data-path YOUR_CSV_PATH
19
+ ```
20
+
21
+ 接下来,我们移除身份损失并训练 3D VAE 管道以重建 260k 步的 2D 压缩视频。
22
+
23
+ ```bash
24
+ torchrun --nnodes=1 --nproc_per_node=8 scripts/train_vae.py configs/vae/train/stage2.py --data-path YOUR_CSV_PATH
25
+ ```
26
+
27
+ 最后,我们移除了 2D 压缩视频的重建损失,并训练 VAE 管道以构建 540k 步的 3D 视频。我们在 34 帧内使用随机数训练 VAE,使其对不同长度的视频更具鲁棒性。此阶段在 24 个 GPU 上进行训练。
28
+
29
+ ```bash
30
+ torchrun --nnodes=3 --nproc_per_node=8 scripts/train_vae.py configs/vae/train/stage3.py --data-path YOUR_CSV_PATH
31
+ ```
32
+
33
+ 请注意,您需要根据自己的 csv 数据大小相应地调整配置文件中的 `epochs` 。
34
+
35
+ ## 推理
36
+
37
+ 为了直观地检查 VAE 的性能,您可以运行以下推理。它使用 `_ori` 后缀(即 `"YOUR_VIDEO_DIR"_ori`)将原始视频保存到您指定的视频目录中,使用`_rec`后缀(即`"YOUR_VIDEO_DIR"_rec`)将来自完整管道的重建视频保存到指定的视频目录中,并使用 `_spatial`后缀(即`"YOUR_VIDEO_DIR"_spatial`)将来自 2D 压缩和解压缩的重建视频保存到指定的视频目录中。
38
+
39
+ ```bash
40
+ torchrun --standalone --nnodes=1 --nproc_per_node=1 scripts/inference_vae.py configs/vae/inference/video.py --ckpt-path YOUR_VAE_CKPT_PATH --data-path YOUR_CSV_PATH --save-dir YOUR_VIDEO_DIR
41
+ ```
42
+ ## 评估
43
+ 然后,我们可以计算 VAE 在 SSIM、PSNR、LPIPS 和 FLOLPIPS 指标上的表现得分。
44
+
45
+ * SSIM: 结构相似性指数度量,越高越好
46
+ * PSNR: 峰值信噪比,越高越好
47
+ * LPIPS: 学习感知图像质量下降,越低越好
48
+ * [FloLPIPS](https://arxiv.org/pdf/2207.08119): 带有视频插值的LPIPS,越低越好。
49
+
50
+ ```bash
51
+ python eval/vae/eval_common_metric.py --batch_size 2 --real_video_dir YOUR_VIDEO_DIR_ori --generated_video_dir YOUR_VIDEO_DIR_rec --device cuda --sample_fps 24 --crop_size 256 --resolution 256 --num_frames 17 --sample_rate 1 --metric ssim psnr lpips flolpips
52
+ ```
53
+
54
+ ## 致谢
55
+ 我们非常感谢以下工作:
56
+ * [MAGVIT-v2](https://arxiv.org/abs/2310.05737): Language Model Beats Diffusion -- Tokenizer is Key to Visual Generation
57
+ * [Taming Transformers](https://github.com/CompVis/taming-transformers): Taming Transformers for High-Resolution Image Synthesis
58
+ * [3D blur pooling](https://github.com/adobe/antialiased-cnns/pull/39/commits/3d6f02b6943c58b68c19c07bc26fad57492ff3bc)
59
+ * [Open-Sora-Plan](https://github.com/PKU-YuanGroup/Open-Sora-Plan)
exp_code/1_benchmark/Open-Sora_v12/eval/README.md ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Evalution
2
+
3
+ ## Human evaluation
4
+
5
+ To conduct human evaluation, we need to generate various samples. We provide many prompts in `assets/texts`, and defined some test setting covering different resolution, duration and aspect ratio in `eval/sample.sh`. To facilitate the usage of multiple GPUs, we split sampling tasks into several parts.
6
+
7
+ ```bash
8
+ # image (1)
9
+ bash eval/sample.sh /path/to/ckpt num_frames model_name_for_log -1
10
+ # video (2a 2b 2c ...)
11
+ bash eval/sample.sh /path/to/ckpt num_frames model_name_for_log -2a
12
+ # launch 8 jobs at once (you must read the script to understand the details)
13
+ bash eval/human_eval/launch.sh /path/to/ckpt num_frames model_name_for_log
14
+ ```
15
+
16
+ ## Rectified Flow Loss
17
+
18
+ Evaluate the rectified flow loss with the following commands.
19
+
20
+ ```bash
21
+ # image
22
+ torchrun --standalone --nproc_per_node 1 eval/loss/eval_loss.py configs/opensora-v1-2/misc/eval_loss.py --data-path /path/to/img.csv --ckpt-path /path/to/ckpt
23
+
24
+ # video
25
+ torchrun --standalone --nproc_per_node 1 eval/loss/eval_loss.py configs/opensora-v1-2/misc/eval_loss.py --data-path /path/to/vid.csv --ckpt-path /path/to/ckpt
26
+
27
+ # select resolution
28
+ torchrun --standalone --nproc_per_node 1 eval/loss/eval_loss.py configs/opensora-v1-2/misc/eval_loss.py --data-path /path/to/vid.csv --ckpt-path /path/to/ckpt --resolution 720p
29
+ ```
30
+
31
+ To launch multiple jobs at once, use the following script.
32
+
33
+ ```bash
34
+ bash eval/loss/launch.sh /path/to/ckpt model_name
35
+ ```
36
+
37
+ To obtain an organized list of scores:
38
+ ```bash
39
+ python eval/loss/tabulate_rl_loss.py --log_dir path/to/log/dir
40
+ ```
41
+
42
+ ## VBench
43
+
44
+ [VBench](https://github.com/Vchitect/VBench) is a benchmark for short text to video generation. We provide a script for easily generating samples required by VBench.
45
+
46
+ First, generate the relevant videos with the following commands:
47
+
48
+ ```bash
49
+ # vbench task, if evaluation all set start_index to 0, end_index to 2000
50
+ bash eval/sample.sh /path/to/ckpt num_frames model_name_for_log -4 start_index end_index
51
+
52
+ # Alternatively, launch 8 jobs at once (you must read the script to understand the details)
53
+ bash eval/vbench/launch.sh /path/to/ckpt num_frames model_name
54
+
55
+ # in addition, you can specify resolution, aspect ratio, sampling steps, flow, and llm-refine
56
+ bash eval/vbench/launch.sh /path/to/ckpt num_frames model_name res_value aspect_ratio_value steps_value flow_value llm_refine_value
57
+ # for example
58
+ # bash eval/vbench/launch.sh /mnt/jfs-hdd/sora/checkpoints/outputs/042-STDiT3-XL-2/epoch1-global_step16200_llm_refine/ema.pt 51 042-STDiT3-XL-2 240p 9:16 30 2 True
59
+ ```
60
+
61
+ After generation, install the VBench package following our [installation](../docs/installation.md)'s sections of "Evaluation Dependencies". Then, run the following commands to evaluate the generated samples.
62
+
63
+ <!-- ```bash
64
+ bash eval/vbench/vbench.sh /path/to/video_folder /path/to/model/ckpt
65
+ ``` -->
66
+
67
+ ```bash
68
+ python eval/vbench/calc_vbench.py /path/to/video_folder /path/to/model/ckpt
69
+ ```
70
+
71
+ Finally, we obtain the scaled scores for the model by:
72
+ ```bash
73
+ python eval/vbench/tabulate_vbench_scores.py --score_dir path/to/score/dir
74
+ ```
75
+
76
+ ## VBench-i2v
77
+
78
+ [VBench-i2v](https://github.com/Vchitect/VBench/tree/master/vbench2_beta_i2v) is a benchmark for short image to video generation (beta version).
79
+ Similarly, install the VBench package following our [installation](../docs/installation.md)'s sections of "Evaluation Dependencies".
80
+
81
+ ```bash
82
+ # Step 1: generate the relevant videos
83
+ # vbench i2v tasks, if evaluation all set start_index to 0, end_index to 2000
84
+ bash eval/sample.sh /path/to/ckpt num_frames model_name_for_log -5 start_index end_index
85
+ # Alternatively, launch 8 jobs at once
86
+ bash eval/vbench_i2v/launch.sh /path/to/ckpt num_frames model_name
87
+
88
+ # Step 2: run vbench to evaluate the generated samples
89
+ python eval/vbench_i2v/vbench_i2v.py /path/to/video_folder /path/to/model/ckpt
90
+ # Note that if you need to go to `VBench/vbench2_beta_i2v/utils.py` and change the harded-coded var `image_root` in the `load_i2v_dimension_info` function to your corresponding image folder.
91
+
92
+ # Step 3: obtain the scaled scores
93
+ python eval/vbench_i2v/tabulate_vbench_i2v_scores.py path/to/videos/folder path/to/your/model/ckpt
94
+ # this will store the results under `eval/vbench_i2v` in the path/to/your/model/ckpt
95
+
96
+ ```
97
+
98
+ Similarly as VBench, you can specify resolution, aspect ratio, sampling steps, flow, and llm-refine
99
+
100
+ ```bash
101
+ bash eval/vbench_i2v/launch.sh /path/to/ckpt num_frames model_name_for_log res_value aspect_ratio_value steps_value flow_value llm_refine_value
102
+ # for example
103
+ # bash eval/vbench_i2v/launch.sh /mnt/jfs-hdd/sora/checkpoints/outputs/042-STDiT3-XL-2/epoch1-global_step16200_llm_refine/ema.pt 51 042-STDiT3-XL-2 240p 9:16 30 2 True
104
+ # if no flow control, use "None" instead
105
+ ```
106
+
107
+ ## VAE
108
+
109
+ Install the dependencies package following our [installation](../docs/installation.md)'s s sections of "Evaluation Dependencies". Then, run the following evaluation command:
110
+
111
+ ```bash
112
+ # metric can any one or list of: ssim, psnr, lpips, flolpips
113
+ python eval/vae/eval_common_metric.py --batch_size 2 --real_video_dir path/to/original/videos --generated_video_dir path/to/generated/videos --device cuda --sample_fps 24 --crop_size 256 --resolution 256 --num_frames 17 --sample_rate 1 --metric ssim psnr lpips flolpips
114
+ ```
exp_code/1_benchmark/Open-Sora_v12/eval/human_eval/generate.sh ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -x
4
+ set -e
5
+
6
+ TEXT_PATH=/home/data/sora_data/pixart-sigma-generated/text.txt
7
+ OUTPUT_PATH=/home/data/sora_data/pixart-sigma-generated/raw
8
+ CMD="python scripts/inference.py configs/pixart/inference/1x2048MS.py"
9
+ # LOG_BASE=logs/sample/generate
10
+ LOG_BASE=$(dirname $CKPT)/eval/generate
11
+ mkdir -p ${LOG_BASE}
12
+ NUM_PER_GPU=10000
13
+ N_LAUNCH=2
14
+ NUM_START=$(($N_LAUNCH * $NUM_PER_GPU * 8))
15
+
16
+ CUDA_VISIBLE_DEVICES=0 $CMD --prompt-path $TEXT_PATH --save-dir $OUTPUT_PATH --start-index $(($NUM_START + $NUM_PER_GPU * 0)) --end-index $(($NUM_START + $NUM_PER_GPU * 1)) --image-size 2048 2048 --verbose 1 --batch-size 2 >${LOG_BASE}/${N_LAUNCH}_1.log 2>&1 &
17
+ CUDA_VISIBLE_DEVICES=1 $CMD --prompt-path $TEXT_PATH --save-dir $OUTPUT_PATH --start-index $(($NUM_START + $NUM_PER_GPU * 1)) --end-index $(($NUM_START + $NUM_PER_GPU * 2)) --image-size 1408 2816 --verbose 1 --batch-size 2 >${LOG_BASE}/${N_LAUNCH}_2.log 2>&1 &
18
+ CUDA_VISIBLE_DEVICES=2 $CMD --prompt-path $TEXT_PATH --save-dir $OUTPUT_PATH --start-index $(($NUM_START + $NUM_PER_GPU * 2)) --end-index $(($NUM_START + $NUM_PER_GPU * 3)) --image-size 2816 1408 --verbose 1 --batch-size 2 >${LOG_BASE}/${N_LAUNCH}_3.log 2>&1 &
19
+ CUDA_VISIBLE_DEVICES=3 $CMD --prompt-path $TEXT_PATH --save-dir $OUTPUT_PATH --start-index $(($NUM_START + $NUM_PER_GPU * 3)) --end-index $(($NUM_START + $NUM_PER_GPU * 4)) --image-size 1664 2304 --verbose 1 --batch-size 2 >${LOG_BASE}/${N_LAUNCH}_4.log 2>&1 &
20
+ CUDA_VISIBLE_DEVICES=4 $CMD --prompt-path $TEXT_PATH --save-dir $OUTPUT_PATH --start-index $(($NUM_START + $NUM_PER_GPU * 4)) --end-index $(($NUM_START + $NUM_PER_GPU * 5)) --image-size 2304 1664 --verbose 1 --batch-size 2 >${LOG_BASE}/${N_LAUNCH}_5.log 2>&1 &
21
+ CUDA_VISIBLE_DEVICES=5 $CMD --prompt-path $TEXT_PATH --save-dir $OUTPUT_PATH --start-index $(($NUM_START + $NUM_PER_GPU * 5)) --end-index $(($NUM_START + $NUM_PER_GPU * 6)) --image-size 1536 2560 --verbose 1 --batch-size 2 >${LOG_BASE}/${N_LAUNCH}_6.log 2>&1 &
22
+ CUDA_VISIBLE_DEVICES=6 $CMD --prompt-path $TEXT_PATH --save-dir $OUTPUT_PATH --start-index $(($NUM_START + $NUM_PER_GPU * 6)) --end-index $(($NUM_START + $NUM_PER_GPU * 7)) --image-size 2560 1536 --verbose 1 --batch-size 2 >${LOG_BASE}/${N_LAUNCH}_7.log 2>&1 &
23
+ CUDA_VISIBLE_DEVICES=7 $CMD --prompt-path $TEXT_PATH --save-dir $OUTPUT_PATH --start-index $(($NUM_START + $NUM_PER_GPU * 7)) --end-index $(($NUM_START + $NUM_PER_GPU * 8)) --image-size 2048 2048 --verbose 1 --batch-size 2 >${LOG_BASE}/${N_LAUNCH}_8.log 2>&1 &
exp_code/1_benchmark/Open-Sora_v12/eval/human_eval/launch.sh ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ CKPT=$1
4
+ NUM_FRAMES=$2
5
+ MODEL_NAME=$3
6
+
7
+ if [[ $CKPT == *"ema"* ]]; then
8
+ parentdir=$(dirname $CKPT)
9
+ CKPT_BASE=$(basename $parentdir)_ema
10
+ else
11
+ CKPT_BASE=$(basename $CKPT)
12
+ fi
13
+ LOG_BASE=$(dirname $CKPT)/eval
14
+ mkdir -p ${LOG_BASE}
15
+ echo "Logging to $LOG_BASE"
16
+
17
+ GPUS=(0 1 2 3 4 5 6 7)
18
+ # TASK_ID_LIST=(1 2a 2b 2c 2d 2e 2f 2g) # move image to video task
19
+ TASK_ID_LIST=(2a 2b 2c 2d 2e 2f 2g 2h)
20
+ # FRAME_LIST=(1 $NUM_FRAMES $NUM_FRAMES $NUM_FRAMES $NUM_FRAMES $NUM_FRAMES $NUM_FRAMES $NUM_FRAMES)
21
+
22
+ for i in "${!GPUS[@]}"; do
23
+ CUDA_VISIBLE_DEVICES=${GPUS[i]} bash eval/sample.sh $CKPT $NUM_FRAMES $MODEL_NAME -${TASK_ID_LIST[i]} >${LOG_BASE}/${TASK_ID_LIST[i]}.log 2>&1 &
24
+ done
25
+
26
+ # kill all by: pkill -f "inference"
exp_code/1_benchmark/Open-Sora_v12/eval/loss/eval_loss.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pprint import pformat
2
+
3
+ import colossalai
4
+ import torch
5
+ import torch.distributed as dist
6
+ from colossalai.cluster import DistCoordinator
7
+ from mmengine.runner import set_random_seed
8
+ from tqdm import tqdm
9
+
10
+ from opensora.acceleration.parallel_states import get_data_parallel_group, set_data_parallel_group
11
+ from opensora.datasets.dataloader import prepare_dataloader
12
+ from opensora.registry import DATASETS, MODELS, SCHEDULERS, build_module
13
+ from opensora.utils.config_utils import parse_configs
14
+ from opensora.utils.misc import create_logger, to_torch_dtype
15
+ from opensora.utils.train_utils import MaskGenerator
16
+
17
+
18
+ def main():
19
+ torch.set_grad_enabled(False)
20
+ # ======================================================
21
+ # configs & runtime variables
22
+ # ======================================================
23
+ # == parse configs ==
24
+ cfg = parse_configs(training=False)
25
+
26
+ # == device and dtype ==
27
+ device = "cuda" if torch.cuda.is_available() else "cpu"
28
+ cfg_dtype = cfg.get("dtype", "fp32")
29
+ assert cfg_dtype in ["fp16", "bf16", "fp32"], f"Unknown mixed precision {cfg_dtype}"
30
+ dtype = to_torch_dtype(cfg.get("dtype", "bf16"))
31
+ torch.backends.cuda.matmul.allow_tf32 = True
32
+ torch.backends.cudnn.allow_tf32 = True
33
+
34
+ # == init distributed env ==
35
+ colossalai.launch_from_torch({})
36
+ DistCoordinator()
37
+ set_random_seed(seed=cfg.get("seed", 1024))
38
+ set_data_parallel_group(dist.group.WORLD)
39
+
40
+ # == init logger ==
41
+ logger = create_logger()
42
+ logger.info("Eval loss configuration:\n %s", pformat(cfg.to_dict()))
43
+
44
+ # ======================================================
45
+ # build model & load weights
46
+ # ======================================================
47
+ logger.info("Building models...")
48
+ # == build text-encoder and vae ==
49
+ text_encoder = build_module(cfg.text_encoder, MODELS, device=device)
50
+ vae = build_module(cfg.vae, MODELS).to(device, dtype).eval()
51
+
52
+ # == build diffusion model ==
53
+ input_size = (None, None, None)
54
+ latent_size = vae.get_latent_size(input_size)
55
+ model = (
56
+ build_module(
57
+ cfg.model,
58
+ MODELS,
59
+ input_size=latent_size,
60
+ in_channels=vae.out_channels,
61
+ caption_channels=text_encoder.output_dim,
62
+ model_max_length=text_encoder.model_max_length,
63
+ )
64
+ .to(device, dtype)
65
+ .eval()
66
+ )
67
+ text_encoder.y_embedder = model.y_embedder # HACK: for classifier-free guidance
68
+
69
+ # == build scheduler ==
70
+ scheduler = build_module(cfg.scheduler, SCHEDULERS)
71
+
72
+ if cfg.get("mask_ratios", None) is not None:
73
+ mask_generator = MaskGenerator(cfg.mask_ratios)
74
+
75
+ # ======================================================
76
+ # inference
77
+ # ======================================================
78
+ # start evaluation, prepare a dataset everytime in the loop
79
+ bucket_config = cfg.bucket_config
80
+ if cfg.get("resolution", None) is not None:
81
+ bucket_config = {cfg.resolution: bucket_config[cfg.resolution]}
82
+ assert bucket_config is not None, "bucket_config is required for evaluation"
83
+ logger.info("Evaluating bucket_config: %s", bucket_config)
84
+
85
+ def build_dataset(resolution, num_frames, batch_size):
86
+ bucket_config = {resolution: {num_frames: (1.0, batch_size)}}
87
+ dataset = build_module(cfg.dataset, DATASETS)
88
+ dataloader_args = dict(
89
+ dataset=dataset,
90
+ batch_size=None,
91
+ num_workers=cfg.num_workers,
92
+ shuffle=False,
93
+ drop_last=False,
94
+ pin_memory=True,
95
+ process_group=get_data_parallel_group(),
96
+ )
97
+ dataloader, sampler = prepare_dataloader(bucket_config=bucket_config, **dataloader_args)
98
+ num_batch = sampler.get_num_batch()
99
+ num_steps_per_epoch = num_batch // dist.get_world_size()
100
+ return dataloader, num_steps_per_epoch, num_batch
101
+
102
+ evaluation_losses = {}
103
+ start = cfg.start_index if "start_index" in cfg else 0
104
+ end = cfg.end_index if "end_index" in cfg else len(bucket_config)
105
+ for i, res in enumerate(bucket_config):
106
+ if i < start or i >= end: # skip task
107
+ continue
108
+
109
+ t_bucket = bucket_config[res]
110
+ for num_frames, (_, batch_size) in t_bucket.items():
111
+ if batch_size is None:
112
+ continue
113
+ logger.info("Evaluating resolution: %s, num_frames: %s", res, num_frames)
114
+ dataloader, num_steps_per_epoch, num_batch = build_dataset(res, num_frames, batch_size)
115
+ if num_batch == 0:
116
+ logger.warning("No data for resolution: %s, num_frames: %s", res, num_frames)
117
+ continue
118
+
119
+ evaluation_t_losses = []
120
+ for t in torch.linspace(0, scheduler.num_timesteps, cfg.get("num_eval_timesteps", 10) + 2)[1:-1]:
121
+ loss_t = 0.0
122
+ num_samples = 0
123
+ dataloader_iter = iter(dataloader)
124
+ for _ in tqdm(range(num_steps_per_epoch), desc=f"res: {res}, num_frames: {num_frames}, t: {t:.2f}"):
125
+ batch = next(dataloader_iter)
126
+ x = batch.pop("video").to(device, dtype)
127
+ y = batch.pop("text")
128
+ x = vae.encode(x)
129
+ model_args = text_encoder.encode(y)
130
+
131
+ # == mask ==
132
+ mask = None
133
+ if cfg.get("mask_ratios", None) is not None:
134
+ mask = mask_generator.get_masks(x)
135
+ model_args["x_mask"] = mask
136
+
137
+ # == video meta info ==
138
+ for k, v in batch.items():
139
+ model_args[k] = v.to(device, dtype)
140
+
141
+ # == diffusion loss computation ==
142
+ timestep = torch.tensor([t] * x.shape[0], device=device, dtype=dtype)
143
+ loss_dict = scheduler.training_losses(model, x, model_args, mask=mask, t=timestep)
144
+ losses = loss_dict["loss"] # (batch_size)
145
+ num_samples += x.shape[0]
146
+ loss_t += losses.sum().item()
147
+ loss_t /= num_samples
148
+ evaluation_t_losses.append(loss_t)
149
+ logger.info("resolution: %s, num_frames: %s, timestep: %.2f, loss: %.4f", res, num_frames, t, loss_t)
150
+
151
+ evaluation_losses[(res, num_frames)] = sum(evaluation_t_losses) / len(evaluation_t_losses)
152
+ logger.info(
153
+ "Evaluation losses for resolution: %s, num_frames: %s, loss: %s\n %s",
154
+ res,
155
+ num_frames,
156
+ evaluation_losses[(res, num_frames)],
157
+ evaluation_t_losses,
158
+ )
159
+ logger.info("Evaluation losses: %s", evaluation_losses)
160
+
161
+
162
+ if __name__ == "__main__":
163
+ main()
exp_code/1_benchmark/Open-Sora_v12/eval/loss/launch.sh ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ CMD="torchrun --standalone --nproc_per_node 1 eval/loss/eval_loss.py configs/opensora-v1-2/misc/eval_loss.py"
4
+ CKPT_PATH=$1
5
+ MODEL_NAME=$2
6
+ IMG_PATH=$3
7
+ VID_PATH=$4
8
+
9
+ if [ -z $IMG_PATH ]; then
10
+ IMG_PATH="/mnt/jfs-hdd/sora/meta/validation/img_1k.csv"
11
+ fi
12
+
13
+ if [ -z $VID_PATH ]; then
14
+ VID_PATH="/mnt/jfs-hdd/sora/meta/validation/vid_100.csv"
15
+ fi
16
+
17
+ if [[ $CKPT_PATH == *"ema"* ]]; then
18
+ parentdir=$(dirname $CKPT_PATH)
19
+ CKPT_BASE=$(basename $parentdir)_ema
20
+ else
21
+ CKPT_BASE=$(basename $CKPT_PATH)
22
+ fi
23
+ LOG_BASE=$(dirname $CKPT_PATH)/eval
24
+ mkdir -p $LOG_BASE
25
+ echo "Logging to $LOG_BASE"
26
+
27
+
28
+ GPUS=(3 4 5 6 7)
29
+ RESOLUTION=(144p 240p 360p 480p 720p)
30
+
31
+ CUDA_VISIBLE_DEVICES=0 $CMD --data-path $IMG_PATH --ckpt-path $CKPT_PATH --start-index 0 --end-index 5 >${LOG_BASE}/img_0.log 2>&1 &
32
+ CUDA_VISIBLE_DEVICES=1 $CMD --data-path $IMG_PATH --ckpt-path $CKPT_PATH --start-index 5 --end-index 6 >${LOG_BASE}/img_1.log 2>&1 &
33
+ CUDA_VISIBLE_DEVICES=2 $CMD --data-path $IMG_PATH --ckpt-path $CKPT_PATH --start-index 6 >${LOG_BASE}/img_2.log 2>&1 &
34
+
35
+
36
+ for i in "${!GPUS[@]}"; do
37
+ CUDA_VISIBLE_DEVICES=${GPUS[i]} $CMD --data-path $VID_PATH --ckpt-path $CKPT_PATH --resolution ${RESOLUTION[i]} >${LOG_BASE}/${RESOLUTION[i]}_vid.log 2>&1 &
38
+ done
exp_code/1_benchmark/Open-Sora_v12/eval/loss/tabulate_rl_loss.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ usage:
3
+ python tabulate_rl_loss.py --log_dir /home/zhengzangwei/projs/Open-Sora-dev/logs/loss --ckpt_name epoch0-global_step9000
4
+
5
+ save the processed json to:
6
+ Open-Sora-dev/evaluation_results/rectified_flow/<ckpt_name>_loss.json
7
+ """
8
+
9
+ import argparse
10
+ import json
11
+ import os
12
+ from ast import literal_eval
13
+
14
+
15
+ def parse_args():
16
+ parser = argparse.ArgumentParser()
17
+ parser.add_argument("--log_dir", type=str)
18
+ args = parser.parse_args()
19
+ return args
20
+
21
+
22
+ if __name__ == "__main__":
23
+ args = parse_args()
24
+
25
+ files = os.listdir(args.log_dir)
26
+ files = [
27
+ "img_0.log",
28
+ "img_1.log",
29
+ "img_2.log",
30
+ "144p_vid.log",
31
+ "240p_vid.log",
32
+ "360p_vid.log",
33
+ "480p_vid.log",
34
+ "720p_vid.log",
35
+ ]
36
+
37
+ loss_info = {}
38
+
39
+ for fname in files:
40
+ path = os.path.join(args.log_dir, fname)
41
+ with open(path, "r", encoding="utf-8") as f:
42
+ content = f.readlines()
43
+ eval_line = content[-1].split("losses:")[-1].strip()
44
+ loss_dict = literal_eval(eval_line)
45
+ for key, loss in loss_dict.items():
46
+ resolution, frame = key
47
+ if resolution not in loss_info:
48
+ loss_info[resolution] = {}
49
+ loss_info[resolution][frame] = format(loss, ".4f")
50
+
51
+ # Convert and write JSON object to file
52
+ output_file_path = os.path.join(args.log_dir, "loss.json")
53
+ with open(output_file_path, "w") as outfile:
54
+ json.dump(loss_info, outfile, indent=4, sort_keys=True)
55
+ print(f"results saved to: {output_file_path}")
exp_code/1_benchmark/Open-Sora_v12/eval/sample.sh ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+
3
+ CKPT=$1
4
+ NUM_FRAMES=$2
5
+ MODEL_NAME=$3
6
+ TASK_TYPE=$4
7
+ VBENCH_START_INDEX=$5
8
+ VBENCH_END_INDEX=$6
9
+ VBENCH_RES=$7
10
+ VBENCH_ASP_RATIO=$8
11
+
12
+ NUM_SAMPLING_STEPS=$9
13
+ FLOW=${10}
14
+ LLM_REFINE=${11}
15
+
16
+ BASE_ASPECT_RATIO=360p
17
+ ASPECT_RATIOS=(144p 240p 360p 480p 720p 1080p)
18
+ # Loop through the list of aspect ratios
19
+ i=0
20
+ for r in "${ASPECT_RATIOS[@]}"; do
21
+ if [[ "$r" == "$BASE_ASPECT_RATIO" ]]; then
22
+ # get aspect ratio 1 level up
23
+ if [[ $((i+1)) -lt ${#ASPECT_RATIOS[@]} ]]; then
24
+ ASPECT_RATIO_INCR_1=${ASPECT_RATIOS[$((i+1))]}
25
+ else
26
+ # If this is the highest ratio, return the highest ratio
27
+ ASPECT_RATIO_INCR_1=${ASPECT_RATIOS[-1]}
28
+ fi
29
+ # get aspect ratio 2 levels up
30
+ if [[ $((i+2)) -lt ${#ASPECT_RATIOS[@]} ]]; then
31
+ ASPECT_RATIO_INCR_2=${ASPECT_RATIOS[$((i+2))]}
32
+ else
33
+ # If this is the highest ratio, return the highest ratio
34
+ ASPECT_RATIO_INCR_2=${ASPECT_RATIOS[-1]}
35
+ fi
36
+ fi
37
+ i=$((i+1))
38
+ done
39
+ echo "base aspect ratio: ${BASE_ASPECT_RATIO}"
40
+ echo "aspect ratio 1 level up: ${ASPECT_RATIO_INCR_1}"
41
+ echo "aspect ratio 2 levels up: ${ASPECT_RATIO_INCR_2}"
42
+ echo "Note that this aspect ratio level setting is used for videos only, not images"
43
+
44
+ echo "NUM_FRAMES=${NUM_FRAMES}"
45
+
46
+ if [ -z "${NUM_FRAMES}" ]; then
47
+ echo "you need to pass NUM_FRAMES"
48
+ else
49
+ let DOUBLE_FRAMES=$2*2
50
+ let QUAD_FRAMES=$2*4
51
+ let OCT_FRAMES=$2*8
52
+ fi
53
+
54
+ echo "DOUBLE_FRAMES=${DOUBLE_FRAMES}"
55
+ echo "QUAD_FRAMES=${QUAD_FRAMES}"
56
+ echo "OCT_FRAMES=${OCT_FRAMES}"
57
+
58
+ CMD="python scripts/inference.py configs/opensora-v1-2/inference/sample.py"
59
+ if [[ $CKPT == *"ema"* ]]; then
60
+ parentdir=$(dirname $CKPT)
61
+ CKPT_BASE=$(basename $parentdir)_ema
62
+ else
63
+ CKPT_BASE=$(basename $CKPT)
64
+ fi
65
+ OUTPUT="/mnt/jfs-hdd/sora/samples/samples_${MODEL_NAME}_${CKPT_BASE}"
66
+ start=$(date +%s)
67
+ DEFAULT_BS=1
68
+
69
+ ### Functions
70
+
71
+ # called inside run_video_b
72
+ function run_image() { # 14min
73
+ # 1.1 1024x1024
74
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2i_samples.txt --save-dir $OUTPUT --num-frames 1 --resolution 1024 --aspect-ratio 1:1 --sample-name image_1024_1_1 --batch-size $DEFAULT_BS
75
+
76
+ # 1.2 240x426
77
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2i_samples.txt --save-dir $OUTPUT --num-frames 1 --resolution 240p --aspect-ratio 9:16 --sample-name image_240p_9_16 --end-index 3 --batch-size $DEFAULT_BS
78
+
79
+ # 1.3 512x512
80
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2i_samples.txt --save-dir $OUTPUT --num-frames 1 --resolution 512 --aspect-ratio 1:1 --sample-name image_t2i_512_1_1 --end-index 3 --batch-size $DEFAULT_BS
81
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_samples.txt --save-dir $OUTPUT --num-frames 1 --resolution 512 --aspect-ratio 1:1 --sample-name image_t2v_512_1_1 --end-index 3 --batch-size $DEFAULT_BS
82
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_short.txt --save-dir $OUTPUT --num-frames 1 --resolution 512 --aspect-ratio 1:1 --sample-name image_short_512_1_1 --end-index 3 --batch-size $DEFAULT_BS
83
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_sora.txt --save-dir $OUTPUT --num-frames 1 --resolution 512 --aspect-ratio 1:1 --sample-name image_sora_512_1_1 --end-index 3 --batch-size $DEFAULT_BS
84
+
85
+ # 1.4 720p multi-resolution
86
+ # 1:1
87
+ PROMPT="Bright scene, aerial view,ancient city, fantasy, gorgeous light, mirror reflection, high detail, wide angle lens."
88
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 1 --resolution 720p --aspect-ratio 1:1 --sample-name image_720p_1_1
89
+ # 9:16
90
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 1 --resolution 720p --aspect-ratio 9:16 --sample-name image_720p_9_16
91
+ # 16:9
92
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 1 --resolution 720p --aspect-ratio 16:9 --sample-name image_720p_16_9
93
+ # 4:3
94
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 1 --resolution 720p --aspect-ratio 4:3 --sample-name image_720p_4_3
95
+ # 3:4
96
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 1 --resolution 720p --aspect-ratio 3:4 --sample-name image_720p_3_4
97
+ # 1:2
98
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 1 --resolution 720p --aspect-ratio 1:2 --sample-name image_720p_1_2
99
+ # 2:1
100
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 1 --resolution 720p --aspect-ratio 2:1 --sample-name image_720p_2_1
101
+ }
102
+
103
+ # for (sample, short, sora)
104
+ # for ( (4s, 720p), (8s, 480p), (16s, 360p) )
105
+
106
+ function run_video_a() { # ~ 30min ?
107
+ ### previous cmds # 42min, sample & multi-resolution
108
+ # # sample, 144p, 9:16, 2s
109
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_samples.txt --save-dir $OUTPUT --num-frames 2s --resolution 144p --aspect-ratio 9:16 --sample-name sample_2s_144p_9_16 --batch-size $DEFAULT_BS
110
+ # # sample, 240p, 9:16, 2s
111
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_samples.txt --save-dir $OUTPUT --num-frames 2s --resolution 240p --aspect-ratio 9:16 --sample-name sample_2s_240p_9_16 --batch-size $DEFAULT_BS
112
+ # # sample, 240p, 9:16, 4s
113
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_samples.txt --save-dir $OUTPUT --num-frames 4s --resolution 240p --aspect-ratio 9:16 --sample-name sample_4s_240p_9_16 --batch-size $DEFAULT_BS
114
+ # # sample, 240p, 9:16, 8s
115
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_samples.txt --save-dir $OUTPUT --num-frames 8s --resolution 240p --aspect-ratio 9:16 --sample-name sample_8s_240p_9_16 --batch-size $DEFAULT_BS
116
+ # # sample, 480p, 9:16, 2s
117
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_samples.txt --save-dir $OUTPUT --num-frames 2s --resolution 480p --aspect-ratio 9:16 --sample-name sample_2s_480p_9_16 --batch-size $DEFAULT_BS
118
+ # # sample, 480p, 9:16, 4s
119
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_samples.txt --save-dir $OUTPUT --num-frames 4s --resolution 480p --aspect-ratio 9:16 --sample-name sample_4s_480p_9_16 --batch-size $DEFAULT_BS
120
+ # # sample, 720p, 9:16, 2s
121
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_samples.txt --save-dir $OUTPUT --num-frames 2s --resolution 720p --aspect-ratio 9:16 --sample-name sample_2s_720p_9_16 --batch-size $DEFAULT_BS
122
+
123
+ # sample, 720p, 9:16, 2s
124
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_samples.txt --save-dir $OUTPUT --num-frames 4s --resolution ${ASPECT_RATIO_INCR_2} --aspect-ratio 9:16 --sample-name sample_4s_${ASPECT_RATIO_INCR_2} --batch-size $DEFAULT_BS
125
+
126
+ # sample, 480p, 9:16, 8s
127
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_samples.txt --save-dir $OUTPUT --num-frames 8s --resolution ${ASPECT_RATIO_INCR_1} --aspect-ratio 9:16 --sample-name sample_8s_${ASPECT_RATIO_INCR_1} --batch-size $DEFAULT_BS
128
+
129
+ # sample, 360p, 9:16, 16s
130
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_samples.txt --save-dir $OUTPUT --num-frames 16s --resolution ${BASE_ASPECT_RATIO} --aspect-ratio 9:16 --sample-name sample_16s_${BASE_ASPECT_RATIO} --batch-size $DEFAULT_BS
131
+ }
132
+
133
+ function run_video_b() { # 18min + 14min = 32min, short 16x240p & 64x240p
134
+ # run image, 14min
135
+ echo "Inside run_video_b, running image samples..."
136
+ run_image
137
+
138
+ echo "Inside run_video_b, running video samples..."
139
+
140
+ ### previous cmds, 18min
141
+ # # short, 240p, 9:16, 4s
142
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_short.txt --save-dir $OUTPUT --num-frames 4s --resolution 240p --aspect-ratio 9:16 --sample-name short_4s_240p_9_16 --batch-size $DEFAULT_BS
143
+ # # short, 240p, 9:16, 8s
144
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_short.txt --save-dir $OUTPUT --num-frames 8s --resolution 240p --aspect-ratio 9:16 --sample-name short_8s_240p_9_16 --batch-size $DEFAULT_BS
145
+
146
+ # short, 480p, 9:16, 8s: ~24min
147
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_short.txt --save-dir $OUTPUT --num-frames 8s --resolution ${ASPECT_RATIO_INCR_1} --aspect-ratio 9:16 --sample-name short_8s_${ASPECT_RATIO_INCR_1} --batch-size $DEFAULT_BS
148
+
149
+ # short, 360p, 9:16, 16s: ~24min
150
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_short.txt --save-dir $OUTPUT --num-frames 16s --resolution ${BASE_ASPECT_RATIO} --aspect-ratio 9:16 --sample-name short_16s_${BASE_ASPECT_RATIO} --batch-size $DEFAULT_BS
151
+
152
+ }
153
+
154
+ function run_video_c() {
155
+ ### previous cmds, 60min
156
+ # # sora, 240p, 16:9, 2s
157
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_sora.txt --save-dir $OUTPUT --num-frames 2s --resolution 240p --aspect-ratio 16:9 --sample-name sora_2s_240p_16_9 --batch-size $DEFAULT_BS
158
+ # # sora, 240p, 9:16, 2s
159
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_sora.txt --save-dir $OUTPUT --num-frames 2s --resolution 240p --aspect-ratio 9:16 --sample-name sora_2s_240p_9_16 --batch-size $DEFAULT_BS
160
+ # # sora, 240p, 9:16, 16s
161
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_sora.txt --save-dir $OUTPUT --num-frames 16s --resolution 240p --aspect-ratio 9:16 --sample-name sora_16s_240p_9_16 --batch-size $DEFAULT_BS
162
+
163
+ # short, 720p, 9:16, 2s: ~9min
164
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_short.txt --save-dir $OUTPUT --num-frames 4s --resolution ${ASPECT_RATIO_INCR_2} --aspect-ratio 9:16 --sample-name short_4s_${ASPECT_RATIO_INCR_2} --batch-size $DEFAULT_BS
165
+
166
+ # sora, 360p, 9:16, 16s: ~40min
167
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_sora.txt --save-dir $OUTPUT --num-frames 16s --resolution ${BASE_ASPECT_RATIO} --aspect-ratio 9:16 --sample-name sora_16s_${BASE_ASPECT_RATIO} --batch-size $DEFAULT_BS
168
+ }
169
+
170
+ function run_video_d() {
171
+ ### previous cmds, 21min + 30min = 51min
172
+ # # short, 480p, 9:16, 4s: 21min
173
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_short.txt --save-dir $OUTPUT --num-frames 4s --resolution 480p --aspect-ratio 9:16 --sample-name short_4s_480p_9_16 --batch-size $DEFAULT_BS
174
+ # # sora, 480p, 9:16, 8s, 1/3 # moved from run_video_e, 30min
175
+ # eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_sora.txt --save-dir $OUTPUT --num-frames 8s --resolution 480p --aspect-ratio 9:16 --sample-name sora_8s_480p_9_16 --batch-size $DEFAULT_BS --start-index 0 --end-index 16
176
+
177
+ # sora, 480p, 9:16, 8s, 1/3 # moved from run_video_e, 30min
178
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_sora.txt --save-dir $OUTPUT --num-frames 8s --resolution ${ASPECT_RATIO_INCR_1} --aspect-ratio 9:16 --sample-name sora_8s_${ASPECT_RATIO_INCR_1} --batch-size $DEFAULT_BS --start-index 0 --end-index 16
179
+ }
180
+
181
+ function run_video_e() { # 90min * 2/3 = 60min
182
+ # sora, 480p, 9:16, 8s, 2/3
183
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_sora.txt --save-dir $OUTPUT --num-frames 8s --resolution ${ASPECT_RATIO_INCR_1} --aspect-ratio 9:16 --sample-name sora_8s_${ASPECT_RATIO_INCR_1} --batch-size $DEFAULT_BS --start-index 16 --end-index 100
184
+ }
185
+
186
+ function run_video_f() { # 60min
187
+ # sora, 720p, 9:16, 2s
188
+ eval $CMD --ckpt-path $CKPT --prompt-path assets/texts/t2v_sora.txt --save-dir $OUTPUT --num-frames 4s --resolution ${ASPECT_RATIO_INCR_2} --aspect-ratio 9:16 --sample-name sora_4s_${ASPECT_RATIO_INCR_2} --batch-size $DEFAULT_BS
189
+ }
190
+
191
+ # --resolution 720p --aspect-ratio [16:9, 9:16, ...]
192
+
193
+ function run_video_g() { # 15min
194
+ # 720p, 2s multi-resolution
195
+ # 1:1
196
+ PROMPT="A soaring drone footage captures the majestic beauty of a coastal cliff, its red and yellow stratified rock faces rich in color and against the vibrant turquoise of the sea. Seabirds can be seen taking flight around the cliff's precipices. As the drone slowly moves from different angles, the changing sunlight casts shifting shadows that highlight the rugged textures of the cliff and the surrounding calm sea. The water gently laps at the rock base and the greenery that clings to the top of the cliff, and the scene gives a sense of peaceful isolation at the fringes of the ocean. The video captures the essence of pristine natural beauty untouched by human structures."
197
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 2s --resolution ${ASPECT_RATIO_INCR_2} --aspect-ratio 1:1 --sample-name drone_cliff_prompt_${ASPECT_RATIO_INCR_2}_2s_1_1
198
+ # 16:9
199
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 2s --resolution ${ASPECT_RATIO_INCR_2} --aspect-ratio 16:9 --sample-name drone_cliff_prompt_${ASPECT_RATIO_INCR_2}_2s_16_9
200
+ # 9:16
201
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 2s --resolution ${ASPECT_RATIO_INCR_2} --aspect-ratio 9:16 --sample-name drone_cliff_prompt_${ASPECT_RATIO_INCR_2}_2s_9_16
202
+ # 4:3
203
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 2s --resolution ${ASPECT_RATIO_INCR_2} --aspect-ratio 4:3 --sample-name drone_cliff_prompt_${ASPECT_RATIO_INCR_2}_2s_4_3
204
+ # 3:4
205
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 2s --resolution ${ASPECT_RATIO_INCR_2} --aspect-ratio 3:4 --sample-name drone_cliff_prompt_${ASPECT_RATIO_INCR_2}_2s_3_4
206
+ # 1:2
207
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 2s --resolution ${ASPECT_RATIO_INCR_2} --aspect-ratio 1:2 --sample-name drone_cliff_prompt_${ASPECT_RATIO_INCR_2}_2s_1_2
208
+ # 2:1
209
+ eval $CMD --ckpt-path $CKPT --prompt \"$PROMPT\" --save-dir $OUTPUT --num-frames 2s --resolution ${ASPECT_RATIO_INCR_2} --aspect-ratio 2:1 --sample-name drone_cliff_prompt_${ASPECT_RATIO_INCR_2}_2s_2_1
210
+
211
+ # add motion score
212
+ eval $CMD --ckpt-path $CKPT --save-dir $OUTPUT --num-frames 2s --resolution ${ASPECT_RATIO_INCR_2} --sample-name motion_2s_${ASPECT_RATIO_INCR_2} --prompt \
213
+ \"A stylish woman walking in the street of Tokyo.\" \"A stylish woman walking in the street of Tokyo. motion score: 0.0\" \
214
+ \"A stylish woman walking in the street of Tokyo. motion score: 2.0\" \
215
+ \"A stylish woman walking in the street of Tokyo. motion score: 4.0\" \
216
+ \"A stylish woman walking in the street of Tokyo. motion score: 6.0\" \
217
+ \"A stylish woman walking in the street of Tokyo. motion score: 10.0\" \
218
+ \"A stylish woman walking in the street of Tokyo. motion score: 25.0\" \
219
+ \"A stylish woman walking in the street of Tokyo. motion score: 50.0\" \
220
+ \"A stylish woman walking in the street of Tokyo. motion score: 100.0\"
221
+
222
+ # add aes score
223
+ eval $CMD --ckpt-path $CKPT --save-dir $OUTPUT --num-frames 2s --resolution ${ASPECT_RATIO_INCR_2} --sample-name aes_2s_${ASPECT_RATIO_INCR_2} --prompt \
224
+ \"A stylish woman walking in the street of Tokyo.\" \"A stylish woman walking in the street of Tokyo. aesthetic score: 4.0\" \
225
+ \"A stylish woman walking in the street of Tokyo. aesthetic score: 4.5\" \
226
+ \"A stylish woman walking in the street of Tokyo. aesthetic score: 5.0\" \
227
+ \"A stylish woman walking in the street of Tokyo. aesthetic score: 5.5\" \
228
+ \"A stylish woman walking in the street of Tokyo. aesthetic score: 6.0\" \
229
+ \"A stylish woman walking in the street of Tokyo. aesthetic score: 6.5\" \
230
+ \"A stylish woman walking in the street of Tokyo. aesthetic score: 7.0\"
231
+ }
232
+
233
+ # resolution -> 480p
234
+
235
+ function run_video_h() { # 61min
236
+ # 3.1 image-conditioned long video generation
237
+ eval $CMD --ckpt-path $CKPT --save-dir $OUTPUT --sample-name ref_L5C5_2s_${BASE_ASPECT_RATIO}_9_16 \
238
+ --prompt-path assets/texts/t2v_ref.txt --start-index 0 --end-index 3 \
239
+ --num-frames 2s --resolution ${BASE_ASPECT_RATIO} --aspect-ratio 9:16 \
240
+ --loop 5 --condition-frame-length 5 \
241
+ --reference-path assets/images/condition/cliff.png assets/images/condition/wave.png assets/images/condition/ship.png \
242
+ --mask-strategy "0" "0" "0" --batch-size $DEFAULT_BS
243
+
244
+ eval $CMD --ckpt-path $CKPT --save-dir $OUTPUT --sample-name ref_L5C10_16s_${BASE_ASPECT_RATIO}_9_16 \
245
+ --prompt-path assets/texts/t2v_ref.txt --start-index 0 --end-index 3 \
246
+ --num-frames 16s --resolution ${BASE_ASPECT_RATIO} --aspect-ratio 9:16 \
247
+ --loop 5 --condition-frame-length 10 \
248
+ --reference-path assets/images/condition/cliff.png assets/images/condition/wave.png assets/images/condition/ship.png \
249
+ --mask-strategy "0" "0" "0" --batch-size $DEFAULT_BS
250
+
251
+ # 3.2
252
+ eval $CMD --ckpt-path $CKPT --save-dir $OUTPUT --sample-name ref_L1_16s_${BASE_ASPECT_RATIO}_9_16 \
253
+ --prompt-path assets/texts/t2v_ref.txt --start-index 3 --end-index 6 \
254
+ --num-frames 16s --resolution ${BASE_ASPECT_RATIO} --aspect-ratio 9:16 \
255
+ --loop 1 \
256
+ --reference-path assets/images/condition/cliff.png "assets/images/condition/cactus-sad.png\;assets/images/condition/cactus-happy.png" https://cdn.openai.com/tmp/s/interp/d0.mp4 \
257
+ --mask-strategy "0" "0\;0,1,0,-1,1" "0,0,0,0,${QUAD_FRAMES},0.5" --batch-size $DEFAULT_BS
258
+ }
259
+
260
+ # vbench has 950 samples
261
+
262
+ VBENCH_BS=1 # 80GB
263
+ VBENCH_H=240
264
+ VBENCH_W=426
265
+
266
+ function run_vbench() {
267
+ if [ -z ${VBENCH_RES} ] || [ -z ${VBENCH_ASP_RATIO} ]; then
268
+ eval $CMD --ckpt-path $CKPT --save-dir ${OUTPUT}_vbench --prompt-as-path --num-sample 5 \
269
+ --prompt-path assets/texts/VBench/all_dimension.txt \
270
+ --image-size $VBENCH_H $VBENCH_W \
271
+ --batch-size $VBENCH_BS --num-frames $NUM_FRAMES --start-index $1 --end-index $2
272
+ else
273
+ if [ -z ${NUM_SAMPLING_STEPS} ]; then
274
+ eval $CMD --ckpt-path $CKPT --save-dir ${OUTPUT}_vbench --prompt-as-path --num-sample 5 \
275
+ --prompt-path assets/texts/VBench/all_dimension.txt \
276
+ --resolution $VBENCH_RES --aspect-ratio $VBENCH_ASP_RATIO \
277
+ --batch-size $VBENCH_BS --num-frames $NUM_FRAMES --start-index $1 --end-index $2
278
+ else
279
+ if [ -z ${FLOW} ]; then
280
+ eval $CMD --ckpt-path $CKPT --save-dir ${OUTPUT}_vbench --prompt-as-path --num-sample 5 \
281
+ --prompt-path assets/texts/VBench/all_dimension.txt \
282
+ --resolution $VBENCH_RES --aspect-ratio $VBENCH_ASP_RATIO --num-sampling-steps ${NUM_SAMPLING_STEPS} \
283
+ --batch-size $VBENCH_BS --num-frames $NUM_FRAMES --start-index $1 --end-index $2
284
+ else
285
+ if [ -z ${LLM_REFINE} ]; then
286
+ eval $CMD --ckpt-path $CKPT --save-dir ${OUTPUT}_vbench --prompt-as-path --num-sample 5 \
287
+ --prompt-path assets/texts/VBench/all_dimension.txt \
288
+ --resolution $VBENCH_RES --aspect-ratio $VBENCH_ASP_RATIO --num-sampling-steps ${NUM_SAMPLING_STEPS} --flow ${FLOW} \
289
+ --batch-size $VBENCH_BS --num-frames $NUM_FRAMES --start-index $1 --end-index $2
290
+ else
291
+ if [ "${FLOW}" = "None" ]; then
292
+ eval $CMD --ckpt-path $CKPT --save-dir ${OUTPUT}_vbench --prompt-as-path --num-sample 5 \
293
+ --prompt-path assets/texts/VBench/all_dimension.txt \
294
+ --resolution $VBENCH_RES --aspect-ratio $VBENCH_ASP_RATIO --num-sampling-steps ${NUM_SAMPLING_STEPS} --llm-refine ${LLM_REFINE} \
295
+ --batch-size $VBENCH_BS --num-frames $NUM_FRAMES --start-index $1 --end-index $2
296
+ else
297
+ eval $CMD --ckpt-path $CKPT --save-dir ${OUTPUT}_vbench --prompt-as-path --num-sample 5 \
298
+ --prompt-path assets/texts/VBench/all_dimension.txt \
299
+ --resolution $VBENCH_RES --aspect-ratio $VBENCH_ASP_RATIO --num-sampling-steps ${NUM_SAMPLING_STEPS} --flow ${FLOW} --llm-refine ${LLM_REFINE} \
300
+ --batch-size $VBENCH_BS --num-frames $NUM_FRAMES --start-index $1 --end-index $2
301
+ fi
302
+ fi
303
+ fi
304
+ fi
305
+ fi
306
+ }
307
+
308
+ # vbench-i2v has 1120 samples
309
+
310
+ VBENCH_I2V_H=256
311
+ VBENCH_I2V_W=256
312
+
313
+ function run_vbench_i2v() {
314
+ if [ -z ${VBENCH_RES} ] || [ -z ${VBENCH_ASP_RATIO} ]; then
315
+ eval $CMD --ckpt-path $CKPT --save-dir ${OUTPUT}_vbench_i2v --prompt-as-path --num-sample 5 \
316
+ --prompt-path assets/texts/VBench/all_i2v.txt \
317
+ --image-size $VBENCH_I2V_H $VBENCH_I2V_W \
318
+ --batch-size $VBENCH_BS --num-frames $NUM_FRAMES --start-index $1 --end-index $2
319
+ else
320
+ if [ -z ${NUM_SAMPLING_STEPS} ]; then
321
+ eval $CMD --ckpt-path $CKPT --save-dir ${OUTPUT}_vbench_i2v --prompt-as-path --num-sample 5 \
322
+ --prompt-path assets/texts/VBench/all_i2v.txt \
323
+ --resolution $VBENCH_RES --aspect-ratio $VBENCH_ASP_RATIO \
324
+ --batch-size $VBENCH_BS --num-frames $NUM_FRAMES --start-index $1 --end-index $2
325
+ else
326
+ if [ -z ${FLOW} ]; then
327
+ eval $CMD --ckpt-path $CKPT --save-dir ${OUTPUT}_vbench_i2v --prompt-as-path --num-sample 5 \
328
+ --prompt-path assets/texts/VBench/all_i2v.txt \
329
+ --resolution $VBENCH_RES --aspect-ratio $VBENCH_ASP_RATIO --num-sampling-steps ${NUM_SAMPLING_STEPS} \
330
+ --batch-size $VBENCH_BS --num-frames $NUM_FRAMES --start-index $1 --end-index $2
331
+ else
332
+ if [ -z ${LLM_REFINE} ]; then
333
+ eval $CMD --ckpt-path $CKPT --save-dir ${OUTPUT}_vbench_i2v --prompt-as-path --num-sample 5 \
334
+ --prompt-path assets/texts/VBench/all_i2v.txt \
335
+ --resolution $VBENCH_RES --aspect-ratio $VBENCH_ASP_RATIO --num-sampling-steps ${NUM_SAMPLING_STEPS} --flow ${FLOW} \
336
+ --batch-size $VBENCH_BS --num-frames $NUM_FRAMES --start-index $1 --end-index $2
337
+ else
338
+ if [ "${FLOW}" = "None" ]; then
339
+ eval $CMD --ckpt-path $CKPT --save-dir ${OUTPUT}_vbench_i2v --prompt-as-path --num-sample 5 \
340
+ --prompt-path assets/texts/VBench/all_i2v.txt \
341
+ --resolution $VBENCH_RES --aspect-ratio $VBENCH_ASP_RATIO --num-sampling-steps ${NUM_SAMPLING_STEPS} --llm-refine ${LLM_REFINE} \
342
+ --batch-size $VBENCH_BS --num-frames $NUM_FRAMES --start-index $1 --end-index $2
343
+ else
344
+ eval $CMD --ckpt-path $CKPT --save-dir ${OUTPUT}_vbench_i2v --prompt-as-path --num-sample 5 \
345
+ --prompt-path assets/texts/VBench/all_i2v.txt \
346
+ --resolution $VBENCH_RES --aspect-ratio $VBENCH_ASP_RATIO --num-sampling-steps ${NUM_SAMPLING_STEPS} --flow ${FLOW} --llm-refine ${LLM_REFINE} \
347
+ --batch-size $VBENCH_BS --num-frames $NUM_FRAMES --start-index $1 --end-index $2
348
+ fi
349
+ fi
350
+ fi
351
+ fi
352
+ fi
353
+ }
354
+
355
+ ### Main
356
+
357
+ for arg in "$@"; do
358
+ # image
359
+ if [[ "$arg" = -1 ]] || [[ "$arg" = --image ]]; then
360
+ echo "Running image samples..."
361
+ run_image
362
+ fi
363
+ if [[ "$arg" = -2a ]] || [[ "$arg" = --video ]]; then
364
+ echo "Running video samples a..."
365
+ run_video_a
366
+ fi
367
+ if [[ "$arg" = -2b ]] || [[ "$arg" = --video ]]; then
368
+ echo "Running video samples b..."
369
+ run_video_b
370
+ fi
371
+ if [[ "$arg" = -2c ]] || [[ "$arg" = --video ]]; then
372
+ echo "Running video samples c..."
373
+ run_video_c
374
+ fi
375
+ if [[ "$arg" = -2d ]] || [[ "$arg" = --video ]]; then
376
+ echo "Running video samples d..."
377
+ run_video_d
378
+ fi
379
+ if [[ "$arg" = -2e ]] || [[ "$arg" = --video ]]; then
380
+ echo "Running video samples e..."
381
+ run_video_e
382
+ fi
383
+ if [[ "$arg" = -2f ]] || [[ "$arg" = --video ]]; then
384
+ echo "Running video samples f..."
385
+ run_video_f
386
+ fi
387
+ if [[ "$arg" = -2g ]] || [[ "$arg" = --video ]]; then
388
+ echo "Running video samples g..."
389
+ run_video_g
390
+ fi
391
+ if [[ "$arg" = -2h ]] || [[ "$arg" = --video ]]; then
392
+ echo "Running video samples h..."
393
+ run_video_h
394
+ fi
395
+ # vbench
396
+ if [[ "$arg" = -4 ]] || [[ "$arg" = --vbench ]]; then
397
+ echo "Running vbench samples ..."
398
+ if [ -z ${VBENCH_START_INDEX} ] || [ -z ${VBENCH_END_INDEX} ]; then
399
+ echo "need to set start_index and end_index"
400
+ else
401
+ run_vbench $VBENCH_START_INDEX $VBENCH_END_INDEX
402
+ fi
403
+ fi
404
+ # vbench-i2v
405
+ if [[ "$arg" = -5 ]] || [[ "$arg" = --vbench-i2v ]]; then
406
+ echo "Running vbench-i2v samples ..."
407
+ if [ -z ${VBENCH_START_INDEX} ] || [ -z ${VBENCH_END_INDEX} ]; then
408
+ echo "need to set start_index and end_index"
409
+ else
410
+ run_vbench_i2v $VBENCH_START_INDEX $VBENCH_END_INDEX
411
+ fi
412
+ fi
413
+ done
414
+
415
+ ### End
416
+
417
+ end=$(date +%s)
418
+
419
+ runtime=$((end - start))
420
+
421
+ echo "Runtime: $runtime seconds"
exp_code/1_benchmark/Open-Sora_v12/eval/vae/cal_flolpips.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import numpy as np
4
+ import torch
5
+ from tqdm import tqdm
6
+
7
+ sys.path.append(".")
8
+
9
+ from flolpips.flolpips import FloLPIPS
10
+ from flolpips.pwcnet import Network as PWCNet
11
+
12
+ loss_fn = FloLPIPS(net="alex", version="0.1").eval().requires_grad_(False)
13
+ flownet = PWCNet().eval().requires_grad_(False)
14
+
15
+
16
+ def trans(x):
17
+ return x
18
+
19
+
20
+ def calculate_flolpips(videos1, videos2, device):
21
+ global loss_fn, flownet
22
+
23
+ print("calculate_flowlpips...")
24
+ loss_fn = loss_fn.to(device)
25
+ flownet = flownet.to(device)
26
+
27
+ if videos1.shape != videos2.shape:
28
+ print("Warning: the shape of videos are not equal.")
29
+ min_frames = min(videos1.shape[1], videos2.shape[1])
30
+ videos1 = videos1[:, :min_frames]
31
+ videos2 = videos2[:, :min_frames]
32
+
33
+ videos1 = trans(videos1)
34
+ videos2 = trans(videos2)
35
+
36
+ flolpips_results = []
37
+ for video_num in tqdm(range(videos1.shape[0])):
38
+ video1 = videos1[video_num].to(device)
39
+ video2 = videos2[video_num].to(device)
40
+ frames_rec = video1[:-1]
41
+ frames_rec_next = video1[1:]
42
+ frames_gt = video2[:-1]
43
+ frames_gt_next = video2[1:]
44
+ t, c, h, w = frames_gt.shape
45
+ flow_gt = flownet(frames_gt, frames_gt_next)
46
+ flow_dis = flownet(frames_rec, frames_rec_next)
47
+ flow_diff = flow_gt - flow_dis
48
+ flolpips = loss_fn.forward(frames_gt, frames_rec, flow_diff, normalize=True)
49
+ flolpips_results.append(flolpips.cpu().numpy().tolist())
50
+
51
+ flolpips_results = np.array(flolpips_results) # [batch_size, num_frames]
52
+ flolpips = {}
53
+ flolpips_std = {}
54
+
55
+ for clip_timestamp in range(flolpips_results.shape[1]):
56
+ flolpips[clip_timestamp] = np.mean(flolpips_results[:, clip_timestamp], axis=-1)
57
+ flolpips_std[clip_timestamp] = np.std(flolpips_results[:, clip_timestamp], axis=-1)
58
+
59
+ result = {
60
+ "value": flolpips,
61
+ "value_std": flolpips_std,
62
+ "video_setting": video1.shape,
63
+ "video_setting_name": "time, channel, heigth, width",
64
+ "result": flolpips_results,
65
+ "details": flolpips_results.tolist(),
66
+ }
67
+
68
+ return result
69
+
70
+
71
+ # test code / using example
72
+
73
+
74
+ def main():
75
+ NUMBER_OF_VIDEOS = 8
76
+ VIDEO_LENGTH = 50
77
+ CHANNEL = 3
78
+ SIZE = 64
79
+ videos1 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
80
+ videos2 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
81
+
82
+ import json
83
+
84
+ result = calculate_flolpips(videos1, videos2, "cuda:0")
85
+ print(json.dumps(result, indent=4))
86
+
87
+
88
+ if __name__ == "__main__":
89
+ main()
exp_code/1_benchmark/Open-Sora_v12/eval/vae/cal_lpips.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import lpips
2
+ import numpy as np
3
+ import torch
4
+ from tqdm import tqdm
5
+
6
+ spatial = True # Return a spatial map of perceptual distance.
7
+
8
+ # Linearly calibrated models (LPIPS)
9
+ loss_fn = lpips.LPIPS(net="alex", spatial=spatial) # Can also set net = 'squeeze' or 'vgg'
10
+ # loss_fn = lpips.LPIPS(net='alex', spatial=spatial, lpips=False) # Can also set net = 'squeeze' or 'vgg'
11
+
12
+
13
+ def trans(x):
14
+ # if greyscale images add channel
15
+ if x.shape[-3] == 1:
16
+ x = x.repeat(1, 1, 3, 1, 1)
17
+
18
+ # value range [0, 1] -> [-1, 1]
19
+ x = x * 2 - 1
20
+
21
+ return x
22
+
23
+
24
+ def calculate_lpips(videos1, videos2, device):
25
+ # image should be RGB, IMPORTANT: normalized to [-1,1]
26
+ print("calculate_lpips...")
27
+
28
+ assert videos1.shape == videos2.shape
29
+
30
+ # videos [batch_size, timestamps, channel, h, w]
31
+
32
+ # support grayscale input, if grayscale -> channel*3
33
+ # value range [0, 1] -> [-1, 1]
34
+ videos1 = trans(videos1)
35
+ videos2 = trans(videos2)
36
+
37
+ lpips_results = []
38
+
39
+ for video_num in tqdm(range(videos1.shape[0])):
40
+ # get a video
41
+ # video [timestamps, channel, h, w]
42
+ video1 = videos1[video_num]
43
+ video2 = videos2[video_num]
44
+
45
+ lpips_results_of_a_video = []
46
+ for clip_timestamp in range(len(video1)):
47
+ # get a img
48
+ # img [timestamps[x], channel, h, w]
49
+ # img [channel, h, w] tensor
50
+
51
+ img1 = video1[clip_timestamp].unsqueeze(0).to(device)
52
+ img2 = video2[clip_timestamp].unsqueeze(0).to(device)
53
+
54
+ loss_fn.to(device)
55
+
56
+ # calculate lpips of a video
57
+ lpips_results_of_a_video.append(loss_fn.forward(img1, img2).mean().detach().cpu().tolist())
58
+ lpips_results.append(lpips_results_of_a_video)
59
+
60
+ lpips_results = np.array(lpips_results)
61
+
62
+ lpips = {}
63
+ lpips_std = {}
64
+
65
+ for clip_timestamp in range(len(video1)):
66
+ lpips[clip_timestamp] = np.mean(lpips_results[:, clip_timestamp])
67
+ lpips_std[clip_timestamp] = np.std(lpips_results[:, clip_timestamp])
68
+
69
+ result = {
70
+ "value": lpips,
71
+ "value_std": lpips_std,
72
+ "video_setting": video1.shape,
73
+ "video_setting_name": "time, channel, heigth, width",
74
+ }
75
+
76
+ return result
77
+
78
+
79
+ # test code / using example
80
+
81
+
82
+ def main():
83
+ NUMBER_OF_VIDEOS = 8
84
+ VIDEO_LENGTH = 50
85
+ CHANNEL = 3
86
+ SIZE = 64
87
+ videos1 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
88
+ videos2 = torch.ones(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
89
+ device = torch.device("cuda")
90
+ # device = torch.device("cpu")
91
+
92
+ import json
93
+
94
+ result = calculate_lpips(videos1, videos2, device)
95
+ print(json.dumps(result, indent=4))
96
+
97
+
98
+ if __name__ == "__main__":
99
+ main()
exp_code/1_benchmark/Open-Sora_v12/eval/vae/cal_psnr.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import numpy as np
4
+ import torch
5
+ from tqdm import tqdm
6
+
7
+
8
+ def img_psnr(img1, img2):
9
+ # [0,1]
10
+ # compute mse
11
+ # mse = np.mean((img1-img2)**2)
12
+ mse = np.mean((img1 / 1.0 - img2 / 1.0) ** 2)
13
+ # compute psnr
14
+ if mse < 1e-10:
15
+ return 100
16
+ psnr = 20 * math.log10(1 / math.sqrt(mse))
17
+ return psnr
18
+
19
+
20
+ def trans(x):
21
+ return x
22
+
23
+
24
+ def calculate_psnr(videos1, videos2):
25
+ print("calculate_psnr...")
26
+
27
+ # videos [batch_size, timestamps, channel, h, w]
28
+
29
+ assert videos1.shape == videos2.shape
30
+
31
+ videos1 = trans(videos1)
32
+ videos2 = trans(videos2)
33
+
34
+ psnr_results = []
35
+
36
+ for video_num in tqdm(range(videos1.shape[0])):
37
+ # get a video
38
+ # video [timestamps, channel, h, w]
39
+ video1 = videos1[video_num]
40
+ video2 = videos2[video_num]
41
+
42
+ psnr_results_of_a_video = []
43
+ for clip_timestamp in range(len(video1)):
44
+ # get a img
45
+ # img [timestamps[x], channel, h, w]
46
+ # img [channel, h, w] numpy
47
+
48
+ img1 = video1[clip_timestamp].numpy()
49
+ img2 = video2[clip_timestamp].numpy()
50
+
51
+ # calculate psnr of a video
52
+ psnr_results_of_a_video.append(img_psnr(img1, img2))
53
+
54
+ psnr_results.append(psnr_results_of_a_video)
55
+
56
+ psnr_results = np.array(psnr_results) # [batch_size, num_frames]
57
+ psnr = {}
58
+ psnr_std = {}
59
+
60
+ for clip_timestamp in range(len(video1)):
61
+ psnr[clip_timestamp] = np.mean(psnr_results[:, clip_timestamp])
62
+ psnr_std[clip_timestamp] = np.std(psnr_results[:, clip_timestamp])
63
+
64
+ result = {
65
+ "value": psnr,
66
+ "value_std": psnr_std,
67
+ "video_setting": video1.shape,
68
+ "video_setting_name": "time, channel, heigth, width",
69
+ }
70
+
71
+ return result
72
+
73
+
74
+ # test code / using example
75
+
76
+
77
+ def main():
78
+ NUMBER_OF_VIDEOS = 8
79
+ VIDEO_LENGTH = 50
80
+ CHANNEL = 3
81
+ SIZE = 64
82
+ videos1 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
83
+ videos2 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
84
+
85
+ import json
86
+
87
+ result = calculate_psnr(videos1, videos2)
88
+ print(json.dumps(result, indent=4))
89
+
90
+
91
+ if __name__ == "__main__":
92
+ main()
exp_code/1_benchmark/Open-Sora_v12/eval/vae/cal_ssim.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+ from tqdm import tqdm
5
+
6
+
7
+ def ssim(img1, img2):
8
+ C1 = 0.01**2
9
+ C2 = 0.03**2
10
+ img1 = img1.astype(np.float64)
11
+ img2 = img2.astype(np.float64)
12
+ kernel = cv2.getGaussianKernel(11, 1.5)
13
+ window = np.outer(kernel, kernel.transpose())
14
+ mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
15
+ mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
16
+ mu1_sq = mu1**2
17
+ mu2_sq = mu2**2
18
+ mu1_mu2 = mu1 * mu2
19
+ sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
20
+ sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
21
+ sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
22
+ ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
23
+ return ssim_map.mean()
24
+
25
+
26
+ def calculate_ssim_function(img1, img2):
27
+ # [0,1]
28
+ # ssim is the only metric extremely sensitive to gray being compared to b/w
29
+ if not img1.shape == img2.shape:
30
+ raise ValueError("Input images must have the same dimensions.")
31
+ if img1.ndim == 2:
32
+ return ssim(img1, img2)
33
+ elif img1.ndim == 3:
34
+ if img1.shape[0] == 3:
35
+ ssims = []
36
+ for i in range(3):
37
+ ssims.append(ssim(img1[i], img2[i]))
38
+ return np.array(ssims).mean()
39
+ elif img1.shape[0] == 1:
40
+ return ssim(np.squeeze(img1), np.squeeze(img2))
41
+ else:
42
+ raise ValueError("Wrong input image dimensions.")
43
+
44
+
45
+ def trans(x):
46
+ return x
47
+
48
+
49
+ def calculate_ssim(videos1, videos2):
50
+ print("calculate_ssim...")
51
+
52
+ # videos [batch_size, timestamps, channel, h, w]
53
+
54
+ assert videos1.shape == videos2.shape
55
+
56
+ videos1 = trans(videos1)
57
+ videos2 = trans(videos2)
58
+
59
+ ssim_results = []
60
+
61
+ for video_num in tqdm(range(videos1.shape[0])):
62
+ # get a video
63
+ # video [timestamps, channel, h, w]
64
+ video1 = videos1[video_num]
65
+ video2 = videos2[video_num]
66
+
67
+ ssim_results_of_a_video = []
68
+ for clip_timestamp in range(len(video1)):
69
+ # get a img
70
+ # img [timestamps[x], channel, h, w]
71
+ # img [channel, h, w] numpy
72
+
73
+ img1 = video1[clip_timestamp].numpy()
74
+ img2 = video2[clip_timestamp].numpy()
75
+
76
+ # calculate ssim of a video
77
+ ssim_results_of_a_video.append(calculate_ssim_function(img1, img2))
78
+
79
+ ssim_results.append(ssim_results_of_a_video)
80
+
81
+ ssim_results = np.array(ssim_results)
82
+
83
+ ssim = {}
84
+ ssim_std = {}
85
+
86
+ for clip_timestamp in range(len(video1)):
87
+ ssim[clip_timestamp] = np.mean(ssim_results[:, clip_timestamp])
88
+ ssim_std[clip_timestamp] = np.std(ssim_results[:, clip_timestamp])
89
+
90
+ result = {
91
+ "value": ssim,
92
+ "value_std": ssim_std,
93
+ "video_setting": video1.shape,
94
+ "video_setting_name": "time, channel, heigth, width",
95
+ }
96
+
97
+ return result
98
+
99
+
100
+ # test code / using example
101
+
102
+
103
+ def main():
104
+ NUMBER_OF_VIDEOS = 8
105
+ VIDEO_LENGTH = 50
106
+ CHANNEL = 3
107
+ SIZE = 64
108
+ videos1 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
109
+ videos2 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
110
+ torch.device("cuda")
111
+
112
+ import json
113
+
114
+ result = calculate_ssim(videos1, videos2)
115
+ print(json.dumps(result, indent=4))
116
+
117
+
118
+ if __name__ == "__main__":
119
+ main()
exp_code/1_benchmark/Open-Sora_v12/eval/vae/eval_common_metric.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Calculates the CLIP Scores
2
+
3
+ The CLIP model is a contrasitively learned language-image model. There is
4
+ an image encoder and a text encoder. It is believed that the CLIP model could
5
+ measure the similarity of cross modalities. Please find more information from
6
+ https://github.com/openai/CLIP.
7
+
8
+ The CLIP Score measures the Cosine Similarity between two embedded features.
9
+ This repository utilizes the pretrained CLIP Model to calculate
10
+ the mean average of cosine similarities.
11
+
12
+ See --help to see further details.
13
+
14
+ Code apapted from https://github.com/mseitzer/pytorch-fid and https://github.com/openai/CLIP.
15
+
16
+ Copyright 2023 The Hong Kong Polytechnic University
17
+
18
+ Licensed under the Apache License, Version 2.0 (the "License");
19
+ you may not use this file except in compliance with the License.
20
+ You may obtain a copy of the License at
21
+
22
+ http://www.apache.org/licenses/LICENSE-2.0
23
+
24
+ Unless required by applicable law or agreed to in writing, software
25
+ distributed under the License is distributed on an "AS IS" BASIS,
26
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
27
+ See the License for the specific language governing permissions and
28
+ limitations under the License.
29
+ """
30
+
31
+ import os
32
+ import os.path as osp
33
+ import sys
34
+ from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
35
+
36
+ import numpy as np
37
+ import torch
38
+ from decord import VideoReader, cpu
39
+ from pytorchvideo.transforms import ShortSideScale
40
+ from torch.utils.data import DataLoader, Dataset, Subset
41
+ from torchvision.transforms import Compose, Lambda
42
+ from torchvision.transforms._transforms_video import CenterCropVideo
43
+
44
+ sys.path.append(".")
45
+ from cal_flolpips import calculate_flolpips
46
+ from cal_lpips import calculate_lpips
47
+ from cal_psnr import calculate_psnr
48
+ from cal_ssim import calculate_ssim
49
+
50
+ try:
51
+ from tqdm import tqdm
52
+ except ImportError:
53
+ # If tqdm is not available, provide a mock version of it
54
+ def tqdm(x):
55
+ return x
56
+
57
+
58
+ class VideoDataset(Dataset):
59
+ def __init__(
60
+ self,
61
+ real_video_dir,
62
+ generated_video_dir,
63
+ num_frames,
64
+ sample_rate=1,
65
+ crop_size=None,
66
+ resolution=128,
67
+ ) -> None:
68
+ super().__init__()
69
+ self.real_video_files = self._combine_without_prefix(real_video_dir)
70
+ self.generated_video_files = self._combine_without_prefix(generated_video_dir)
71
+ self.num_frames = num_frames
72
+ self.sample_rate = sample_rate
73
+ self.crop_size = crop_size
74
+ self.short_size = resolution
75
+
76
+ def __len__(self):
77
+ return len(self.real_video_files)
78
+
79
+ def __getitem__(self, index):
80
+ if index >= len(self):
81
+ raise IndexError
82
+ real_video_file = self.real_video_files[index]
83
+ generated_video_file = self.generated_video_files[index]
84
+ print(real_video_file, generated_video_file)
85
+ real_video_tensor = self._load_video(real_video_file)
86
+ generated_video_tensor = self._load_video(generated_video_file)
87
+ return {"real": real_video_tensor, "generated": generated_video_tensor}
88
+
89
+ def _load_video(self, video_path):
90
+ num_frames = self.num_frames
91
+ sample_rate = self.sample_rate
92
+ decord_vr = VideoReader(video_path, ctx=cpu(0))
93
+ total_frames = len(decord_vr)
94
+ sample_frames_len = sample_rate * num_frames
95
+
96
+ if total_frames >= sample_frames_len:
97
+ s = 0
98
+ e = s + sample_frames_len
99
+ num_frames = num_frames
100
+ else:
101
+ s = 0
102
+ e = total_frames
103
+ num_frames = int(total_frames / sample_frames_len * num_frames)
104
+ print(
105
+ f"sample_frames_len {sample_frames_len}, only can sample {num_frames * sample_rate}",
106
+ video_path,
107
+ total_frames,
108
+ )
109
+
110
+ frame_id_list = np.linspace(s, e - 1, num_frames, dtype=int)
111
+ video_data = decord_vr.get_batch(frame_id_list).asnumpy()
112
+ video_data = torch.from_numpy(video_data)
113
+ video_data = video_data.permute(0, 3, 1, 2) # (T, H, W, C) -> (C, T, H, W)
114
+ return _preprocess(video_data, short_size=self.short_size, crop_size=self.crop_size)
115
+
116
+ def _combine_without_prefix(self, folder_path, prefix="."):
117
+ folder = []
118
+ os.makedirs(folder_path, exist_ok=True)
119
+ for name in os.listdir(folder_path):
120
+ if name[0] == prefix:
121
+ continue
122
+ if osp.isfile(osp.join(folder_path, name)):
123
+ folder.append(osp.join(folder_path, name))
124
+ folder.sort()
125
+ return folder
126
+
127
+
128
+ def _preprocess(video_data, short_size=128, crop_size=None):
129
+ transform = Compose(
130
+ [
131
+ Lambda(lambda x: x / 255.0),
132
+ ShortSideScale(size=short_size),
133
+ CenterCropVideo(crop_size=crop_size),
134
+ ]
135
+ )
136
+ video_outputs = transform(video_data)
137
+ # video_outputs = torch.unsqueeze(video_outputs, 0) # (bz,c,t,h,w)
138
+ return video_outputs
139
+
140
+
141
+ def calculate_common_metric(args, dataloader, device):
142
+ metric_dict = {}
143
+ if type(args.metric) is str:
144
+ args.metric = [m.strip() for m in args.metric.split(",")]
145
+ print(args.metric)
146
+ for metric in args.metric:
147
+ score_list = []
148
+ for batch_data in tqdm(dataloader): # {'real': real_video_tensor, 'generated':generated_video_tensor }
149
+ real_videos = batch_data["real"]
150
+ generated_videos = batch_data["generated"]
151
+ assert real_videos.shape[2] == generated_videos.shape[2]
152
+ if metric == "ssim":
153
+ tmp_list = list(calculate_ssim(real_videos, generated_videos)["value"].values())
154
+ elif metric == "psnr":
155
+ tmp_list = list(calculate_psnr(real_videos, generated_videos)["value"].values())
156
+ elif metric == "flolpips":
157
+ result = calculate_flolpips(real_videos, generated_videos, args.device)
158
+ tmp_list = list(result["value"].values())
159
+ elif metric == "lpips":
160
+ tmp_list = list(calculate_lpips(real_videos, generated_videos, args.device)["value"].values())
161
+ else:
162
+ print(f"metric {metric} is not in acceped list, not calculated")
163
+ continue
164
+ score_list += tmp_list
165
+ metric_dict[metric] = np.mean(score_list)
166
+
167
+ return metric_dict
168
+
169
+
170
+ def main():
171
+ parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
172
+ parser.add_argument("--batch_size", type=int, default=2, help="Batch size to use")
173
+ parser.add_argument("--real_video_dir", type=str, help=("the path of real videos`"))
174
+ parser.add_argument("--generated_video_dir", type=str, help=("the path of generated videos`"))
175
+ parser.add_argument("--device", type=str, default=None, help="Device to use. Like cuda, cuda:0 or cpu")
176
+ parser.add_argument(
177
+ "--num_workers",
178
+ type=int,
179
+ default=8,
180
+ help=("Number of processes to use for data loading. " "Defaults to `min(8, num_cpus)`"),
181
+ )
182
+ parser.add_argument("--sample_fps", type=int, default=30)
183
+ parser.add_argument("--resolution", type=int, default=336)
184
+ parser.add_argument("--crop_size", type=int, default=None)
185
+ parser.add_argument("--num_frames", type=int, default=100)
186
+ parser.add_argument("--sample_rate", type=int, default=1)
187
+ parser.add_argument("--subset_size", type=int, default=None)
188
+ # parser.add_argument("--metric", type=str, default="fvd",choices=['fvd','psnr','ssim','lpips', 'flolpips'])
189
+ parser.add_argument("--metric", nargs="+", default=[])
190
+ parser.add_argument("--fvd_method", type=str, default="styleganv", choices=["styleganv", "videogpt"])
191
+
192
+ args = parser.parse_args()
193
+
194
+ if args.device is None:
195
+ device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu")
196
+ else:
197
+ device = torch.device(args.device)
198
+
199
+ if args.num_workers is None:
200
+ try:
201
+ num_cpus = len(os.sched_getaffinity(0))
202
+ except AttributeError:
203
+ # os.sched_getaffinity is not available under Windows, use
204
+ # os.cpu_count instead (which may not return the *available* number
205
+ # of CPUs).
206
+ num_cpus = os.cpu_count()
207
+
208
+ num_workers = min(num_cpus, 8) if num_cpus is not None else 0
209
+ else:
210
+ num_workers = args.num_workers
211
+
212
+ dataset = VideoDataset(
213
+ args.real_video_dir,
214
+ args.generated_video_dir,
215
+ num_frames=args.num_frames,
216
+ sample_rate=args.sample_rate,
217
+ crop_size=args.crop_size,
218
+ resolution=args.resolution,
219
+ )
220
+
221
+ if args.subset_size:
222
+ indices = range(args.subset_size)
223
+ dataset = Subset(dataset, indices=indices)
224
+
225
+ dataloader = DataLoader(dataset, args.batch_size, num_workers=num_workers, pin_memory=True)
226
+
227
+ metric_score = calculate_common_metric(args, dataloader, device)
228
+ print("metric: ", args.metric, " ", metric_score)
229
+
230
+
231
+ if __name__ == "__main__":
232
+ main()