LituRout commited on
Commit
cc7aa9d
1 Parent(s): 553199c
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. stable-diffusion/configs/autoencoder/autoencoder_kl_16x16x16.yaml +54 -0
  2. stable-diffusion/configs/autoencoder/autoencoder_kl_32x32x4.yaml +53 -0
  3. stable-diffusion/configs/autoencoder/autoencoder_kl_64x64x3.yaml +54 -0
  4. stable-diffusion/configs/autoencoder/autoencoder_kl_8x8x64.yaml +53 -0
  5. stable-diffusion/configs/latent-diffusion/celebahq-ldm-vq-4.yaml +86 -0
  6. stable-diffusion/configs/latent-diffusion/cin-ldm-vq-f8.yaml +98 -0
  7. stable-diffusion/configs/latent-diffusion/cin256-v2.yaml +68 -0
  8. stable-diffusion/configs/latent-diffusion/ffhq-ldm-vq-4.yaml +85 -0
  9. stable-diffusion/configs/latent-diffusion/lsun_bedrooms-ldm-vq-4.yaml +85 -0
  10. stable-diffusion/configs/latent-diffusion/lsun_churches-ldm-kl-8.yaml +91 -0
  11. stable-diffusion/configs/latent-diffusion/txt2img-1p4B-eval.yaml +71 -0
  12. stable-diffusion/configs/retrieval-augmented-diffusion/768x768.yaml +68 -0
  13. stable-diffusion/configs/stable-diffusion/v1-inference.yaml +70 -0
  14. stable-diffusion/debug/generate.sh +14 -0
  15. stable-diffusion/debug/inverse.sh +13 -0
  16. stable-diffusion/debug/inverse_bip_ldm_laion.sh +13 -0
  17. stable-diffusion/debug/inverse_mb_ldm_laion.sh +13 -0
  18. stable-diffusion/ldm/__pycache__/util.cpython-38.pyc +0 -0
  19. stable-diffusion/ldm/__pycache__/util.cpython-39.pyc +0 -0
  20. stable-diffusion/ldm/data/__init__.py +0 -0
  21. stable-diffusion/ldm/data/base.py +23 -0
  22. stable-diffusion/ldm/data/imagenet.py +394 -0
  23. stable-diffusion/ldm/data/lsun.py +92 -0
  24. stable-diffusion/ldm/lr_scheduler.py +98 -0
  25. stable-diffusion/ldm/models/__pycache__/autoencoder.cpython-38.pyc +0 -0
  26. stable-diffusion/ldm/models/autoencoder.py +443 -0
  27. stable-diffusion/ldm/models/diffusion/__init__.py +0 -0
  28. stable-diffusion/ldm/models/diffusion/__pycache__/__init__.cpython-38.pyc +0 -0
  29. stable-diffusion/ldm/models/diffusion/__pycache__/__init__.cpython-39.pyc +0 -0
  30. stable-diffusion/ldm/models/diffusion/__pycache__/ddim.cpython-38.pyc +0 -0
  31. stable-diffusion/ldm/models/diffusion/__pycache__/ddim.cpython-39.pyc +0 -0
  32. stable-diffusion/ldm/models/diffusion/__pycache__/ddpm.cpython-38.pyc +0 -0
  33. stable-diffusion/ldm/models/diffusion/__pycache__/ddpm.cpython-39.pyc +0 -0
  34. stable-diffusion/ldm/models/diffusion/__pycache__/plms.cpython-38.pyc +0 -0
  35. stable-diffusion/ldm/models/diffusion/__pycache__/plms.cpython-39.pyc +0 -0
  36. stable-diffusion/ldm/models/diffusion/__pycache__/psld.cpython-38.pyc +0 -0
  37. stable-diffusion/ldm/models/diffusion/__pycache__/psld.cpython-39.pyc +0 -0
  38. stable-diffusion/ldm/models/diffusion/classifier.py +267 -0
  39. stable-diffusion/ldm/models/diffusion/ddim.py +241 -0
  40. stable-diffusion/ldm/models/diffusion/ddpm.py +1445 -0
  41. stable-diffusion/ldm/models/diffusion/dpm_solver/__init__.py +1 -0
  42. stable-diffusion/ldm/models/diffusion/dpm_solver/__pycache__/__init__.cpython-38.pyc +0 -0
  43. stable-diffusion/ldm/models/diffusion/dpm_solver/__pycache__/__init__.cpython-39.pyc +0 -0
  44. stable-diffusion/ldm/models/diffusion/dpm_solver/__pycache__/dpm_solver.cpython-38.pyc +0 -0
  45. stable-diffusion/ldm/models/diffusion/dpm_solver/__pycache__/dpm_solver.cpython-39.pyc +0 -0
  46. stable-diffusion/ldm/models/diffusion/dpm_solver/__pycache__/sampler.cpython-38.pyc +0 -0
  47. stable-diffusion/ldm/models/diffusion/dpm_solver/__pycache__/sampler.cpython-39.pyc +0 -0
  48. stable-diffusion/ldm/models/diffusion/dpm_solver/dpm_solver.py +1184 -0
  49. stable-diffusion/ldm/models/diffusion/dpm_solver/sampler.py +82 -0
  50. stable-diffusion/ldm/models/diffusion/plms.py +236 -0
stable-diffusion/configs/autoencoder/autoencoder_kl_16x16x16.yaml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 4.5e-6
3
+ target: ldm.models.autoencoder.AutoencoderKL
4
+ params:
5
+ monitor: "val/rec_loss"
6
+ embed_dim: 16
7
+ lossconfig:
8
+ target: ldm.modules.losses.LPIPSWithDiscriminator
9
+ params:
10
+ disc_start: 50001
11
+ kl_weight: 0.000001
12
+ disc_weight: 0.5
13
+
14
+ ddconfig:
15
+ double_z: True
16
+ z_channels: 16
17
+ resolution: 256
18
+ in_channels: 3
19
+ out_ch: 3
20
+ ch: 128
21
+ ch_mult: [ 1,1,2,2,4] # num_down = len(ch_mult)-1
22
+ num_res_blocks: 2
23
+ attn_resolutions: [16]
24
+ dropout: 0.0
25
+
26
+
27
+ data:
28
+ target: main.DataModuleFromConfig
29
+ params:
30
+ batch_size: 12
31
+ wrap: True
32
+ train:
33
+ target: ldm.data.imagenet.ImageNetSRTrain
34
+ params:
35
+ size: 256
36
+ degradation: pil_nearest
37
+ validation:
38
+ target: ldm.data.imagenet.ImageNetSRValidation
39
+ params:
40
+ size: 256
41
+ degradation: pil_nearest
42
+
43
+ lightning:
44
+ callbacks:
45
+ image_logger:
46
+ target: main.ImageLogger
47
+ params:
48
+ batch_frequency: 1000
49
+ max_images: 8
50
+ increase_log_steps: True
51
+
52
+ trainer:
53
+ benchmark: True
54
+ accumulate_grad_batches: 2
stable-diffusion/configs/autoencoder/autoencoder_kl_32x32x4.yaml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 4.5e-6
3
+ target: ldm.models.autoencoder.AutoencoderKL
4
+ params:
5
+ monitor: "val/rec_loss"
6
+ embed_dim: 4
7
+ lossconfig:
8
+ target: ldm.modules.losses.LPIPSWithDiscriminator
9
+ params:
10
+ disc_start: 50001
11
+ kl_weight: 0.000001
12
+ disc_weight: 0.5
13
+
14
+ ddconfig:
15
+ double_z: True
16
+ z_channels: 4
17
+ resolution: 256
18
+ in_channels: 3
19
+ out_ch: 3
20
+ ch: 128
21
+ ch_mult: [ 1,2,4,4 ] # num_down = len(ch_mult)-1
22
+ num_res_blocks: 2
23
+ attn_resolutions: [ ]
24
+ dropout: 0.0
25
+
26
+ data:
27
+ target: main.DataModuleFromConfig
28
+ params:
29
+ batch_size: 12
30
+ wrap: True
31
+ train:
32
+ target: ldm.data.imagenet.ImageNetSRTrain
33
+ params:
34
+ size: 256
35
+ degradation: pil_nearest
36
+ validation:
37
+ target: ldm.data.imagenet.ImageNetSRValidation
38
+ params:
39
+ size: 256
40
+ degradation: pil_nearest
41
+
42
+ lightning:
43
+ callbacks:
44
+ image_logger:
45
+ target: main.ImageLogger
46
+ params:
47
+ batch_frequency: 1000
48
+ max_images: 8
49
+ increase_log_steps: True
50
+
51
+ trainer:
52
+ benchmark: True
53
+ accumulate_grad_batches: 2
stable-diffusion/configs/autoencoder/autoencoder_kl_64x64x3.yaml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 4.5e-6
3
+ target: ldm.models.autoencoder.AutoencoderKL
4
+ params:
5
+ monitor: "val/rec_loss"
6
+ embed_dim: 3
7
+ lossconfig:
8
+ target: ldm.modules.losses.LPIPSWithDiscriminator
9
+ params:
10
+ disc_start: 50001
11
+ kl_weight: 0.000001
12
+ disc_weight: 0.5
13
+
14
+ ddconfig:
15
+ double_z: True
16
+ z_channels: 3
17
+ resolution: 256
18
+ in_channels: 3
19
+ out_ch: 3
20
+ ch: 128
21
+ ch_mult: [ 1,2,4 ] # num_down = len(ch_mult)-1
22
+ num_res_blocks: 2
23
+ attn_resolutions: [ ]
24
+ dropout: 0.0
25
+
26
+
27
+ data:
28
+ target: main.DataModuleFromConfig
29
+ params:
30
+ batch_size: 12
31
+ wrap: True
32
+ train:
33
+ target: ldm.data.imagenet.ImageNetSRTrain
34
+ params:
35
+ size: 256
36
+ degradation: pil_nearest
37
+ validation:
38
+ target: ldm.data.imagenet.ImageNetSRValidation
39
+ params:
40
+ size: 256
41
+ degradation: pil_nearest
42
+
43
+ lightning:
44
+ callbacks:
45
+ image_logger:
46
+ target: main.ImageLogger
47
+ params:
48
+ batch_frequency: 1000
49
+ max_images: 8
50
+ increase_log_steps: True
51
+
52
+ trainer:
53
+ benchmark: True
54
+ accumulate_grad_batches: 2
stable-diffusion/configs/autoencoder/autoencoder_kl_8x8x64.yaml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 4.5e-6
3
+ target: ldm.models.autoencoder.AutoencoderKL
4
+ params:
5
+ monitor: "val/rec_loss"
6
+ embed_dim: 64
7
+ lossconfig:
8
+ target: ldm.modules.losses.LPIPSWithDiscriminator
9
+ params:
10
+ disc_start: 50001
11
+ kl_weight: 0.000001
12
+ disc_weight: 0.5
13
+
14
+ ddconfig:
15
+ double_z: True
16
+ z_channels: 64
17
+ resolution: 256
18
+ in_channels: 3
19
+ out_ch: 3
20
+ ch: 128
21
+ ch_mult: [ 1,1,2,2,4,4] # num_down = len(ch_mult)-1
22
+ num_res_blocks: 2
23
+ attn_resolutions: [16,8]
24
+ dropout: 0.0
25
+
26
+ data:
27
+ target: main.DataModuleFromConfig
28
+ params:
29
+ batch_size: 12
30
+ wrap: True
31
+ train:
32
+ target: ldm.data.imagenet.ImageNetSRTrain
33
+ params:
34
+ size: 256
35
+ degradation: pil_nearest
36
+ validation:
37
+ target: ldm.data.imagenet.ImageNetSRValidation
38
+ params:
39
+ size: 256
40
+ degradation: pil_nearest
41
+
42
+ lightning:
43
+ callbacks:
44
+ image_logger:
45
+ target: main.ImageLogger
46
+ params:
47
+ batch_frequency: 1000
48
+ max_images: 8
49
+ increase_log_steps: True
50
+
51
+ trainer:
52
+ benchmark: True
53
+ accumulate_grad_batches: 2
stable-diffusion/configs/latent-diffusion/celebahq-ldm-vq-4.yaml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 2.0e-06
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.0015
6
+ linear_end: 0.0195
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: image
11
+ image_size: 64
12
+ channels: 3
13
+ monitor: val/loss_simple_ema
14
+
15
+ unet_config:
16
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
17
+ params:
18
+ image_size: 64
19
+ in_channels: 3
20
+ out_channels: 3
21
+ model_channels: 224
22
+ attention_resolutions:
23
+ # note: this isn\t actually the resolution but
24
+ # the downsampling factor, i.e. this corresnponds to
25
+ # attention on spatial resolution 8,16,32, as the
26
+ # spatial reolution of the latents is 64 for f4
27
+ - 8
28
+ - 4
29
+ - 2
30
+ num_res_blocks: 2
31
+ channel_mult:
32
+ - 1
33
+ - 2
34
+ - 3
35
+ - 4
36
+ num_head_channels: 32
37
+ first_stage_config:
38
+ target: ldm.models.autoencoder.VQModelInterface
39
+ params:
40
+ embed_dim: 3
41
+ n_embed: 8192
42
+ ckpt_path: models/first_stage_models/vq-f4/model.ckpt
43
+ ddconfig:
44
+ double_z: false
45
+ z_channels: 3
46
+ resolution: 256
47
+ in_channels: 3
48
+ out_ch: 3
49
+ ch: 128
50
+ ch_mult:
51
+ - 1
52
+ - 2
53
+ - 4
54
+ num_res_blocks: 2
55
+ attn_resolutions: []
56
+ dropout: 0.0
57
+ lossconfig:
58
+ target: torch.nn.Identity
59
+ cond_stage_config: __is_unconditional__
60
+ data:
61
+ target: main.DataModuleFromConfig
62
+ params:
63
+ batch_size: 48
64
+ num_workers: 5
65
+ wrap: false
66
+ train:
67
+ target: taming.data.faceshq.CelebAHQTrain
68
+ params:
69
+ size: 256
70
+ validation:
71
+ target: taming.data.faceshq.CelebAHQValidation
72
+ params:
73
+ size: 256
74
+
75
+
76
+ lightning:
77
+ callbacks:
78
+ image_logger:
79
+ target: main.ImageLogger
80
+ params:
81
+ batch_frequency: 5000
82
+ max_images: 8
83
+ increase_log_steps: False
84
+
85
+ trainer:
86
+ benchmark: True
stable-diffusion/configs/latent-diffusion/cin-ldm-vq-f8.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-06
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.0015
6
+ linear_end: 0.0195
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: image
11
+ cond_stage_key: class_label
12
+ image_size: 32
13
+ channels: 4
14
+ cond_stage_trainable: true
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ unet_config:
18
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
19
+ params:
20
+ image_size: 32
21
+ in_channels: 4
22
+ out_channels: 4
23
+ model_channels: 256
24
+ attention_resolutions:
25
+ #note: this isn\t actually the resolution but
26
+ # the downsampling factor, i.e. this corresnponds to
27
+ # attention on spatial resolution 8,16,32, as the
28
+ # spatial reolution of the latents is 32 for f8
29
+ - 4
30
+ - 2
31
+ - 1
32
+ num_res_blocks: 2
33
+ channel_mult:
34
+ - 1
35
+ - 2
36
+ - 4
37
+ num_head_channels: 32
38
+ use_spatial_transformer: true
39
+ transformer_depth: 1
40
+ context_dim: 512
41
+ first_stage_config:
42
+ target: ldm.models.autoencoder.VQModelInterface
43
+ params:
44
+ embed_dim: 4
45
+ n_embed: 16384
46
+ ckpt_path: configs/first_stage_models/vq-f8/model.yaml
47
+ ddconfig:
48
+ double_z: false
49
+ z_channels: 4
50
+ resolution: 256
51
+ in_channels: 3
52
+ out_ch: 3
53
+ ch: 128
54
+ ch_mult:
55
+ - 1
56
+ - 2
57
+ - 2
58
+ - 4
59
+ num_res_blocks: 2
60
+ attn_resolutions:
61
+ - 32
62
+ dropout: 0.0
63
+ lossconfig:
64
+ target: torch.nn.Identity
65
+ cond_stage_config:
66
+ target: ldm.modules.encoders.modules.ClassEmbedder
67
+ params:
68
+ embed_dim: 512
69
+ key: class_label
70
+ data:
71
+ target: main.DataModuleFromConfig
72
+ params:
73
+ batch_size: 64
74
+ num_workers: 12
75
+ wrap: false
76
+ train:
77
+ target: ldm.data.imagenet.ImageNetTrain
78
+ params:
79
+ config:
80
+ size: 256
81
+ validation:
82
+ target: ldm.data.imagenet.ImageNetValidation
83
+ params:
84
+ config:
85
+ size: 256
86
+
87
+
88
+ lightning:
89
+ callbacks:
90
+ image_logger:
91
+ target: main.ImageLogger
92
+ params:
93
+ batch_frequency: 5000
94
+ max_images: 8
95
+ increase_log_steps: False
96
+
97
+ trainer:
98
+ benchmark: True
stable-diffusion/configs/latent-diffusion/cin256-v2.yaml ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 0.0001
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.0015
6
+ linear_end: 0.0195
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: image
11
+ cond_stage_key: class_label
12
+ image_size: 64
13
+ channels: 3
14
+ cond_stage_trainable: true
15
+ conditioning_key: crossattn
16
+ monitor: val/loss
17
+ use_ema: False
18
+
19
+ unet_config:
20
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
21
+ params:
22
+ image_size: 64
23
+ in_channels: 3
24
+ out_channels: 3
25
+ model_channels: 192
26
+ attention_resolutions:
27
+ - 8
28
+ - 4
29
+ - 2
30
+ num_res_blocks: 2
31
+ channel_mult:
32
+ - 1
33
+ - 2
34
+ - 3
35
+ - 5
36
+ num_heads: 1
37
+ use_spatial_transformer: true
38
+ transformer_depth: 1
39
+ context_dim: 512
40
+
41
+ first_stage_config:
42
+ target: ldm.models.autoencoder.VQModelInterface
43
+ params:
44
+ embed_dim: 3
45
+ n_embed: 8192
46
+ ddconfig:
47
+ double_z: false
48
+ z_channels: 3
49
+ resolution: 256
50
+ in_channels: 3
51
+ out_ch: 3
52
+ ch: 128
53
+ ch_mult:
54
+ - 1
55
+ - 2
56
+ - 4
57
+ num_res_blocks: 2
58
+ attn_resolutions: []
59
+ dropout: 0.0
60
+ lossconfig:
61
+ target: torch.nn.Identity
62
+
63
+ cond_stage_config:
64
+ target: ldm.modules.encoders.modules.ClassEmbedder
65
+ params:
66
+ n_classes: 1001
67
+ embed_dim: 512
68
+ key: class_label
stable-diffusion/configs/latent-diffusion/ffhq-ldm-vq-4.yaml ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 2.0e-06
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.0015
6
+ linear_end: 0.0195
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: image
11
+ image_size: 64
12
+ channels: 3
13
+ monitor: val/loss_simple_ema
14
+ unet_config:
15
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
16
+ params:
17
+ image_size: 64
18
+ in_channels: 3
19
+ out_channels: 3
20
+ model_channels: 224
21
+ attention_resolutions:
22
+ # note: this isn\t actually the resolution but
23
+ # the downsampling factor, i.e. this corresnponds to
24
+ # attention on spatial resolution 8,16,32, as the
25
+ # spatial reolution of the latents is 64 for f4
26
+ - 8
27
+ - 4
28
+ - 2
29
+ num_res_blocks: 2
30
+ channel_mult:
31
+ - 1
32
+ - 2
33
+ - 3
34
+ - 4
35
+ num_head_channels: 32
36
+ first_stage_config:
37
+ target: ldm.models.autoencoder.VQModelInterface
38
+ params:
39
+ embed_dim: 3
40
+ n_embed: 8192
41
+ ckpt_path: configs/first_stage_models/vq-f4/model.yaml
42
+ ddconfig:
43
+ double_z: false
44
+ z_channels: 3
45
+ resolution: 256
46
+ in_channels: 3
47
+ out_ch: 3
48
+ ch: 128
49
+ ch_mult:
50
+ - 1
51
+ - 2
52
+ - 4
53
+ num_res_blocks: 2
54
+ attn_resolutions: []
55
+ dropout: 0.0
56
+ lossconfig:
57
+ target: torch.nn.Identity
58
+ cond_stage_config: __is_unconditional__
59
+ data:
60
+ target: main.DataModuleFromConfig
61
+ params:
62
+ batch_size: 42
63
+ num_workers: 5
64
+ wrap: false
65
+ train:
66
+ target: taming.data.faceshq.FFHQTrain
67
+ params:
68
+ size: 256
69
+ validation:
70
+ target: taming.data.faceshq.FFHQValidation
71
+ params:
72
+ size: 256
73
+
74
+
75
+ lightning:
76
+ callbacks:
77
+ image_logger:
78
+ target: main.ImageLogger
79
+ params:
80
+ batch_frequency: 5000
81
+ max_images: 8
82
+ increase_log_steps: False
83
+
84
+ trainer:
85
+ benchmark: True
stable-diffusion/configs/latent-diffusion/lsun_bedrooms-ldm-vq-4.yaml ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 2.0e-06
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.0015
6
+ linear_end: 0.0195
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: image
11
+ image_size: 64
12
+ channels: 3
13
+ monitor: val/loss_simple_ema
14
+ unet_config:
15
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
16
+ params:
17
+ image_size: 64
18
+ in_channels: 3
19
+ out_channels: 3
20
+ model_channels: 224
21
+ attention_resolutions:
22
+ # note: this isn\t actually the resolution but
23
+ # the downsampling factor, i.e. this corresnponds to
24
+ # attention on spatial resolution 8,16,32, as the
25
+ # spatial reolution of the latents is 64 for f4
26
+ - 8
27
+ - 4
28
+ - 2
29
+ num_res_blocks: 2
30
+ channel_mult:
31
+ - 1
32
+ - 2
33
+ - 3
34
+ - 4
35
+ num_head_channels: 32
36
+ first_stage_config:
37
+ target: ldm.models.autoencoder.VQModelInterface
38
+ params:
39
+ ckpt_path: configs/first_stage_models/vq-f4/model.yaml
40
+ embed_dim: 3
41
+ n_embed: 8192
42
+ ddconfig:
43
+ double_z: false
44
+ z_channels: 3
45
+ resolution: 256
46
+ in_channels: 3
47
+ out_ch: 3
48
+ ch: 128
49
+ ch_mult:
50
+ - 1
51
+ - 2
52
+ - 4
53
+ num_res_blocks: 2
54
+ attn_resolutions: []
55
+ dropout: 0.0
56
+ lossconfig:
57
+ target: torch.nn.Identity
58
+ cond_stage_config: __is_unconditional__
59
+ data:
60
+ target: main.DataModuleFromConfig
61
+ params:
62
+ batch_size: 48
63
+ num_workers: 5
64
+ wrap: false
65
+ train:
66
+ target: ldm.data.lsun.LSUNBedroomsTrain
67
+ params:
68
+ size: 256
69
+ validation:
70
+ target: ldm.data.lsun.LSUNBedroomsValidation
71
+ params:
72
+ size: 256
73
+
74
+
75
+ lightning:
76
+ callbacks:
77
+ image_logger:
78
+ target: main.ImageLogger
79
+ params:
80
+ batch_frequency: 5000
81
+ max_images: 8
82
+ increase_log_steps: False
83
+
84
+ trainer:
85
+ benchmark: True
stable-diffusion/configs/latent-diffusion/lsun_churches-ldm-kl-8.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 5.0e-5 # set to target_lr by starting main.py with '--scale_lr False'
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.0015
6
+ linear_end: 0.0155
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ loss_type: l1
11
+ first_stage_key: "image"
12
+ cond_stage_key: "image"
13
+ image_size: 32
14
+ channels: 4
15
+ cond_stage_trainable: False
16
+ concat_mode: False
17
+ scale_by_std: True
18
+ monitor: 'val/loss_simple_ema'
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [10000]
24
+ cycle_lengths: [10000000000000]
25
+ f_start: [1.e-6]
26
+ f_max: [1.]
27
+ f_min: [ 1.]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ image_size: 32
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 192
36
+ attention_resolutions: [ 1, 2, 4, 8 ] # 32, 16, 8, 4
37
+ num_res_blocks: 2
38
+ channel_mult: [ 1,2,2,4,4 ] # 32, 16, 8, 4, 2
39
+ num_heads: 8
40
+ use_scale_shift_norm: True
41
+ resblock_updown: True
42
+
43
+ first_stage_config:
44
+ target: ldm.models.autoencoder.AutoencoderKL
45
+ params:
46
+ embed_dim: 4
47
+ monitor: "val/rec_loss"
48
+ ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
49
+ ddconfig:
50
+ double_z: True
51
+ z_channels: 4
52
+ resolution: 256
53
+ in_channels: 3
54
+ out_ch: 3
55
+ ch: 128
56
+ ch_mult: [ 1,2,4,4 ] # num_down = len(ch_mult)-1
57
+ num_res_blocks: 2
58
+ attn_resolutions: [ ]
59
+ dropout: 0.0
60
+ lossconfig:
61
+ target: torch.nn.Identity
62
+
63
+ cond_stage_config: "__is_unconditional__"
64
+
65
+ data:
66
+ target: main.DataModuleFromConfig
67
+ params:
68
+ batch_size: 96
69
+ num_workers: 5
70
+ wrap: False
71
+ train:
72
+ target: ldm.data.lsun.LSUNChurchesTrain
73
+ params:
74
+ size: 256
75
+ validation:
76
+ target: ldm.data.lsun.LSUNChurchesValidation
77
+ params:
78
+ size: 256
79
+
80
+ lightning:
81
+ callbacks:
82
+ image_logger:
83
+ target: main.ImageLogger
84
+ params:
85
+ batch_frequency: 5000
86
+ max_images: 8
87
+ increase_log_steps: False
88
+
89
+
90
+ trainer:
91
+ benchmark: True
stable-diffusion/configs/latent-diffusion/txt2img-1p4B-eval.yaml ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 5.0e-05
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.012
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: image
11
+ cond_stage_key: caption
12
+ image_size: 32
13
+ channels: 4
14
+ cond_stage_trainable: true
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+
20
+ unet_config:
21
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
22
+ params:
23
+ image_size: 32
24
+ in_channels: 4
25
+ out_channels: 4
26
+ model_channels: 320
27
+ attention_resolutions:
28
+ - 4
29
+ - 2
30
+ - 1
31
+ num_res_blocks: 2
32
+ channel_mult:
33
+ - 1
34
+ - 2
35
+ - 4
36
+ - 4
37
+ num_heads: 8
38
+ use_spatial_transformer: true
39
+ transformer_depth: 1
40
+ context_dim: 1280
41
+ use_checkpoint: true
42
+ legacy: False
43
+
44
+ first_stage_config:
45
+ target: ldm.models.autoencoder.AutoencoderKL
46
+ params:
47
+ embed_dim: 4
48
+ monitor: val/rec_loss
49
+ ddconfig:
50
+ double_z: true
51
+ z_channels: 4
52
+ resolution: 256
53
+ in_channels: 3
54
+ out_ch: 3
55
+ ch: 128
56
+ ch_mult:
57
+ - 1
58
+ - 2
59
+ - 4
60
+ - 4
61
+ num_res_blocks: 2
62
+ attn_resolutions: []
63
+ dropout: 0.0
64
+ lossconfig:
65
+ target: torch.nn.Identity
66
+
67
+ cond_stage_config:
68
+ target: ldm.modules.encoders.modules.BERTEmbedder
69
+ params:
70
+ n_embed: 1280
71
+ n_layer: 32
stable-diffusion/configs/retrieval-augmented-diffusion/768x768.yaml ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 0.0001
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.0015
6
+ linear_end: 0.015
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: jpg
11
+ cond_stage_key: nix
12
+ image_size: 48
13
+ channels: 16
14
+ cond_stage_trainable: false
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_by_std: false
18
+ scale_factor: 0.22765929
19
+ unet_config:
20
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
21
+ params:
22
+ image_size: 48
23
+ in_channels: 16
24
+ out_channels: 16
25
+ model_channels: 448
26
+ attention_resolutions:
27
+ - 4
28
+ - 2
29
+ - 1
30
+ num_res_blocks: 2
31
+ channel_mult:
32
+ - 1
33
+ - 2
34
+ - 3
35
+ - 4
36
+ use_scale_shift_norm: false
37
+ resblock_updown: false
38
+ num_head_channels: 32
39
+ use_spatial_transformer: true
40
+ transformer_depth: 1
41
+ context_dim: 768
42
+ use_checkpoint: true
43
+ first_stage_config:
44
+ target: ldm.models.autoencoder.AutoencoderKL
45
+ params:
46
+ monitor: val/rec_loss
47
+ embed_dim: 16
48
+ ddconfig:
49
+ double_z: true
50
+ z_channels: 16
51
+ resolution: 256
52
+ in_channels: 3
53
+ out_ch: 3
54
+ ch: 128
55
+ ch_mult:
56
+ - 1
57
+ - 1
58
+ - 2
59
+ - 2
60
+ - 4
61
+ num_res_blocks: 2
62
+ attn_resolutions:
63
+ - 16
64
+ dropout: 0.0
65
+ lossconfig:
66
+ target: torch.nn.Identity
67
+ cond_stage_config:
68
+ target: torch.nn.Identity
stable-diffusion/configs/stable-diffusion/v1-inference.yaml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-04
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 10000 ]
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ image_size: 32 # unused
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [ 4, 2, 1 ]
37
+ num_res_blocks: 2
38
+ channel_mult: [ 1, 2, 4, 4 ]
39
+ num_heads: 8
40
+ use_spatial_transformer: True
41
+ transformer_depth: 1
42
+ context_dim: 768
43
+ use_checkpoint: True
44
+ legacy: False
45
+
46
+ first_stage_config:
47
+ target: ldm.models.autoencoder.AutoencoderKL
48
+ params:
49
+ embed_dim: 4
50
+ monitor: val/rec_loss
51
+ ddconfig:
52
+ double_z: true
53
+ z_channels: 4
54
+ resolution: 256
55
+ in_channels: 3
56
+ out_ch: 3
57
+ ch: 128
58
+ ch_mult:
59
+ - 1
60
+ - 2
61
+ - 4
62
+ - 4
63
+ num_res_blocks: 2
64
+ attn_resolutions: []
65
+ dropout: 0.0
66
+ lossconfig:
67
+ target: torch.nn.Identity
68
+
69
+ cond_stage_config:
70
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
stable-diffusion/debug/generate.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES='2'
2
+ python scripts/inverse.py \
3
+ --file_id='00019.png' \
4
+ --task_config='configs/motion_deblur_config.yaml' \
5
+ --inpainting=0 \
6
+ --general_inverse=0 \
7
+ --gamma=1e-1 \
8
+ --omega=1e-1 \
9
+ --W=256 \
10
+ --H=256 \
11
+ --scale=5.0 \
12
+ --laion400m \
13
+ --prompt="a photograph of fantasy landscape trending in art station" \
14
+ --outdir="outputs/txt2img-samples-laion400m"
stable-diffusion/debug/inverse.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES='1'
2
+ python scripts/inverse.py \
3
+ --file_id='00019.png' \
4
+ --task_config='configs/motion_deblur_config.yaml' \
5
+ --inpainting=0 \
6
+ --general_inverse=0 \
7
+ --gamma=1e-1 \
8
+ --omega=1e-1 \
9
+ --W=256 \
10
+ --H=256 \
11
+ --scale=5.0 \
12
+ --laion400m \
13
+ --prompt="a virus monster is playing guitar, oil on canvas"
stable-diffusion/debug/inverse_bip_ldm_laion.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES='1'
2
+ python scripts/inverse.py \
3
+ --file_id='00019.png' \
4
+ --task_config='configs/box_inpainting_config.yaml' \
5
+ --inpainting=1 \
6
+ --general_inverse=0 \
7
+ --gamma=1e-1 \
8
+ --omega=1 \
9
+ --W=256 \
10
+ --H=256 \
11
+ --scale=5.0 \
12
+ --laion400m \
13
+ --outdir="outputs/psld-ldm-laion400m-bip"
stable-diffusion/debug/inverse_mb_ldm_laion.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES='1'
2
+ python scripts/inverse.py \
3
+ --file_id='00019.png' \
4
+ --task_config='configs/motion_deblur_config.yaml' \
5
+ --inpainting=0 \
6
+ --general_inverse=1 \
7
+ --gamma=1e-1 \
8
+ --omega=1 \
9
+ --W=256 \
10
+ --H=256 \
11
+ --scale=5.0 \
12
+ --laion400m \
13
+ --outdir="outputs/psld-ldm-laion400m-mb"
stable-diffusion/ldm/__pycache__/util.cpython-38.pyc ADDED
Binary file (6.09 kB). View file
 
stable-diffusion/ldm/__pycache__/util.cpython-39.pyc ADDED
Binary file (6.12 kB). View file
 
stable-diffusion/ldm/data/__init__.py ADDED
File without changes
stable-diffusion/ldm/data/base.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset
3
+
4
+
5
+ class Txt2ImgIterableBaseDataset(IterableDataset):
6
+ '''
7
+ Define an interface to make the IterableDatasets for text2img data chainable
8
+ '''
9
+ def __init__(self, num_records=0, valid_ids=None, size=256):
10
+ super().__init__()
11
+ self.num_records = num_records
12
+ self.valid_ids = valid_ids
13
+ self.sample_ids = valid_ids
14
+ self.size = size
15
+
16
+ print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.')
17
+
18
+ def __len__(self):
19
+ return self.num_records
20
+
21
+ @abstractmethod
22
+ def __iter__(self):
23
+ pass
stable-diffusion/ldm/data/imagenet.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, yaml, pickle, shutil, tarfile, glob
2
+ import cv2
3
+ import albumentations
4
+ import PIL
5
+ import numpy as np
6
+ import torchvision.transforms.functional as TF
7
+ from omegaconf import OmegaConf
8
+ from functools import partial
9
+ from PIL import Image
10
+ from tqdm import tqdm
11
+ from torch.utils.data import Dataset, Subset
12
+
13
+ import taming.data.utils as tdu
14
+ from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve
15
+ from taming.data.imagenet import ImagePaths
16
+
17
+ from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light
18
+
19
+
20
+ def synset2idx(path_to_yaml="data/index_synset.yaml"):
21
+ with open(path_to_yaml) as f:
22
+ di2s = yaml.load(f)
23
+ return dict((v,k) for k,v in di2s.items())
24
+
25
+
26
+ class ImageNetBase(Dataset):
27
+ def __init__(self, config=None):
28
+ self.config = config or OmegaConf.create()
29
+ if not type(self.config)==dict:
30
+ self.config = OmegaConf.to_container(self.config)
31
+ self.keep_orig_class_label = self.config.get("keep_orig_class_label", False)
32
+ self.process_images = True # if False we skip loading & processing images and self.data contains filepaths
33
+ self._prepare()
34
+ self._prepare_synset_to_human()
35
+ self._prepare_idx_to_synset()
36
+ self._prepare_human_to_integer_label()
37
+ self._load()
38
+
39
+ def __len__(self):
40
+ return len(self.data)
41
+
42
+ def __getitem__(self, i):
43
+ return self.data[i]
44
+
45
+ def _prepare(self):
46
+ raise NotImplementedError()
47
+
48
+ def _filter_relpaths(self, relpaths):
49
+ ignore = set([
50
+ "n06596364_9591.JPEG",
51
+ ])
52
+ relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore]
53
+ if "sub_indices" in self.config:
54
+ indices = str_to_indices(self.config["sub_indices"])
55
+ synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings
56
+ self.synset2idx = synset2idx(path_to_yaml=self.idx2syn)
57
+ files = []
58
+ for rpath in relpaths:
59
+ syn = rpath.split("/")[0]
60
+ if syn in synsets:
61
+ files.append(rpath)
62
+ return files
63
+ else:
64
+ return relpaths
65
+
66
+ def _prepare_synset_to_human(self):
67
+ SIZE = 2655750
68
+ URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1"
69
+ self.human_dict = os.path.join(self.root, "synset_human.txt")
70
+ if (not os.path.exists(self.human_dict) or
71
+ not os.path.getsize(self.human_dict)==SIZE):
72
+ download(URL, self.human_dict)
73
+
74
+ def _prepare_idx_to_synset(self):
75
+ URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1"
76
+ self.idx2syn = os.path.join(self.root, "index_synset.yaml")
77
+ if (not os.path.exists(self.idx2syn)):
78
+ download(URL, self.idx2syn)
79
+
80
+ def _prepare_human_to_integer_label(self):
81
+ URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1"
82
+ self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt")
83
+ if (not os.path.exists(self.human2integer)):
84
+ download(URL, self.human2integer)
85
+ with open(self.human2integer, "r") as f:
86
+ lines = f.read().splitlines()
87
+ assert len(lines) == 1000
88
+ self.human2integer_dict = dict()
89
+ for line in lines:
90
+ value, key = line.split(":")
91
+ self.human2integer_dict[key] = int(value)
92
+
93
+ def _load(self):
94
+ with open(self.txt_filelist, "r") as f:
95
+ self.relpaths = f.read().splitlines()
96
+ l1 = len(self.relpaths)
97
+ self.relpaths = self._filter_relpaths(self.relpaths)
98
+ print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths)))
99
+
100
+ self.synsets = [p.split("/")[0] for p in self.relpaths]
101
+ self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]
102
+
103
+ unique_synsets = np.unique(self.synsets)
104
+ class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets))
105
+ if not self.keep_orig_class_label:
106
+ self.class_labels = [class_dict[s] for s in self.synsets]
107
+ else:
108
+ self.class_labels = [self.synset2idx[s] for s in self.synsets]
109
+
110
+ with open(self.human_dict, "r") as f:
111
+ human_dict = f.read().splitlines()
112
+ human_dict = dict(line.split(maxsplit=1) for line in human_dict)
113
+
114
+ self.human_labels = [human_dict[s] for s in self.synsets]
115
+
116
+ labels = {
117
+ "relpath": np.array(self.relpaths),
118
+ "synsets": np.array(self.synsets),
119
+ "class_label": np.array(self.class_labels),
120
+ "human_label": np.array(self.human_labels),
121
+ }
122
+
123
+ if self.process_images:
124
+ self.size = retrieve(self.config, "size", default=256)
125
+ self.data = ImagePaths(self.abspaths,
126
+ labels=labels,
127
+ size=self.size,
128
+ random_crop=self.random_crop,
129
+ )
130
+ else:
131
+ self.data = self.abspaths
132
+
133
+
134
+ class ImageNetTrain(ImageNetBase):
135
+ NAME = "ILSVRC2012_train"
136
+ URL = "http://www.image-net.org/challenges/LSVRC/2012/"
137
+ AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2"
138
+ FILES = [
139
+ "ILSVRC2012_img_train.tar",
140
+ ]
141
+ SIZES = [
142
+ 147897477120,
143
+ ]
144
+
145
+ def __init__(self, process_images=True, data_root=None, **kwargs):
146
+ self.process_images = process_images
147
+ self.data_root = data_root
148
+ super().__init__(**kwargs)
149
+
150
+ def _prepare(self):
151
+ if self.data_root:
152
+ self.root = os.path.join(self.data_root, self.NAME)
153
+ else:
154
+ cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
155
+ self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
156
+
157
+ self.datadir = os.path.join(self.root, "data")
158
+ self.txt_filelist = os.path.join(self.root, "filelist.txt")
159
+ self.expected_length = 1281167
160
+ self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop",
161
+ default=True)
162
+ if not tdu.is_prepared(self.root):
163
+ # prep
164
+ print("Preparing dataset {} in {}".format(self.NAME, self.root))
165
+
166
+ datadir = self.datadir
167
+ if not os.path.exists(datadir):
168
+ path = os.path.join(self.root, self.FILES[0])
169
+ if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
170
+ import academictorrents as at
171
+ atpath = at.get(self.AT_HASH, datastore=self.root)
172
+ assert atpath == path
173
+
174
+ print("Extracting {} to {}".format(path, datadir))
175
+ os.makedirs(datadir, exist_ok=True)
176
+ with tarfile.open(path, "r:") as tar:
177
+ tar.extractall(path=datadir)
178
+
179
+ print("Extracting sub-tars.")
180
+ subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar")))
181
+ for subpath in tqdm(subpaths):
182
+ subdir = subpath[:-len(".tar")]
183
+ os.makedirs(subdir, exist_ok=True)
184
+ with tarfile.open(subpath, "r:") as tar:
185
+ tar.extractall(path=subdir)
186
+
187
+ filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
188
+ filelist = [os.path.relpath(p, start=datadir) for p in filelist]
189
+ filelist = sorted(filelist)
190
+ filelist = "\n".join(filelist)+"\n"
191
+ with open(self.txt_filelist, "w") as f:
192
+ f.write(filelist)
193
+
194
+ tdu.mark_prepared(self.root)
195
+
196
+
197
+ class ImageNetValidation(ImageNetBase):
198
+ NAME = "ILSVRC2012_validation"
199
+ URL = "http://www.image-net.org/challenges/LSVRC/2012/"
200
+ AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5"
201
+ VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1"
202
+ FILES = [
203
+ "ILSVRC2012_img_val.tar",
204
+ "validation_synset.txt",
205
+ ]
206
+ SIZES = [
207
+ 6744924160,
208
+ 1950000,
209
+ ]
210
+
211
+ def __init__(self, process_images=True, data_root=None, **kwargs):
212
+ self.data_root = data_root
213
+ self.process_images = process_images
214
+ super().__init__(**kwargs)
215
+
216
+ def _prepare(self):
217
+ if self.data_root:
218
+ self.root = os.path.join(self.data_root, self.NAME)
219
+ else:
220
+ cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
221
+ self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
222
+ self.datadir = os.path.join(self.root, "data")
223
+ self.txt_filelist = os.path.join(self.root, "filelist.txt")
224
+ self.expected_length = 50000
225
+ self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop",
226
+ default=False)
227
+ if not tdu.is_prepared(self.root):
228
+ # prep
229
+ print("Preparing dataset {} in {}".format(self.NAME, self.root))
230
+
231
+ datadir = self.datadir
232
+ if not os.path.exists(datadir):
233
+ path = os.path.join(self.root, self.FILES[0])
234
+ if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
235
+ import academictorrents as at
236
+ atpath = at.get(self.AT_HASH, datastore=self.root)
237
+ assert atpath == path
238
+
239
+ print("Extracting {} to {}".format(path, datadir))
240
+ os.makedirs(datadir, exist_ok=True)
241
+ with tarfile.open(path, "r:") as tar:
242
+ tar.extractall(path=datadir)
243
+
244
+ vspath = os.path.join(self.root, self.FILES[1])
245
+ if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]:
246
+ download(self.VS_URL, vspath)
247
+
248
+ with open(vspath, "r") as f:
249
+ synset_dict = f.read().splitlines()
250
+ synset_dict = dict(line.split() for line in synset_dict)
251
+
252
+ print("Reorganizing into synset folders")
253
+ synsets = np.unique(list(synset_dict.values()))
254
+ for s in synsets:
255
+ os.makedirs(os.path.join(datadir, s), exist_ok=True)
256
+ for k, v in synset_dict.items():
257
+ src = os.path.join(datadir, k)
258
+ dst = os.path.join(datadir, v)
259
+ shutil.move(src, dst)
260
+
261
+ filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
262
+ filelist = [os.path.relpath(p, start=datadir) for p in filelist]
263
+ filelist = sorted(filelist)
264
+ filelist = "\n".join(filelist)+"\n"
265
+ with open(self.txt_filelist, "w") as f:
266
+ f.write(filelist)
267
+
268
+ tdu.mark_prepared(self.root)
269
+
270
+
271
+
272
+ class ImageNetSR(Dataset):
273
+ def __init__(self, size=None,
274
+ degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1.,
275
+ random_crop=True):
276
+ """
277
+ Imagenet Superresolution Dataloader
278
+ Performs following ops in order:
279
+ 1. crops a crop of size s from image either as random or center crop
280
+ 2. resizes crop to size with cv2.area_interpolation
281
+ 3. degrades resized crop with degradation_fn
282
+
283
+ :param size: resizing to size after cropping
284
+ :param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light
285
+ :param downscale_f: Low Resolution Downsample factor
286
+ :param min_crop_f: determines crop size s,
287
+ where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f)
288
+ :param max_crop_f: ""
289
+ :param data_root:
290
+ :param random_crop:
291
+ """
292
+ self.base = self.get_base()
293
+ assert size
294
+ assert (size / downscale_f).is_integer()
295
+ self.size = size
296
+ self.LR_size = int(size / downscale_f)
297
+ self.min_crop_f = min_crop_f
298
+ self.max_crop_f = max_crop_f
299
+ assert(max_crop_f <= 1.)
300
+ self.center_crop = not random_crop
301
+
302
+ self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA)
303
+
304
+ self.pil_interpolation = False # gets reset later if incase interp_op is from pillow
305
+
306
+ if degradation == "bsrgan":
307
+ self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f)
308
+
309
+ elif degradation == "bsrgan_light":
310
+ self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f)
311
+
312
+ else:
313
+ interpolation_fn = {
314
+ "cv_nearest": cv2.INTER_NEAREST,
315
+ "cv_bilinear": cv2.INTER_LINEAR,
316
+ "cv_bicubic": cv2.INTER_CUBIC,
317
+ "cv_area": cv2.INTER_AREA,
318
+ "cv_lanczos": cv2.INTER_LANCZOS4,
319
+ "pil_nearest": PIL.Image.NEAREST,
320
+ "pil_bilinear": PIL.Image.BILINEAR,
321
+ "pil_bicubic": PIL.Image.BICUBIC,
322
+ "pil_box": PIL.Image.BOX,
323
+ "pil_hamming": PIL.Image.HAMMING,
324
+ "pil_lanczos": PIL.Image.LANCZOS,
325
+ }[degradation]
326
+
327
+ self.pil_interpolation = degradation.startswith("pil_")
328
+
329
+ if self.pil_interpolation:
330
+ self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn)
331
+
332
+ else:
333
+ self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size,
334
+ interpolation=interpolation_fn)
335
+
336
+ def __len__(self):
337
+ return len(self.base)
338
+
339
+ def __getitem__(self, i):
340
+ example = self.base[i]
341
+ image = Image.open(example["file_path_"])
342
+
343
+ if not image.mode == "RGB":
344
+ image = image.convert("RGB")
345
+
346
+ image = np.array(image).astype(np.uint8)
347
+
348
+ min_side_len = min(image.shape[:2])
349
+ crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None)
350
+ crop_side_len = int(crop_side_len)
351
+
352
+ if self.center_crop:
353
+ self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len)
354
+
355
+ else:
356
+ self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len)
357
+
358
+ image = self.cropper(image=image)["image"]
359
+ image = self.image_rescaler(image=image)["image"]
360
+
361
+ if self.pil_interpolation:
362
+ image_pil = PIL.Image.fromarray(image)
363
+ LR_image = self.degradation_process(image_pil)
364
+ LR_image = np.array(LR_image).astype(np.uint8)
365
+
366
+ else:
367
+ LR_image = self.degradation_process(image=image)["image"]
368
+
369
+ example["image"] = (image/127.5 - 1.0).astype(np.float32)
370
+ example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32)
371
+
372
+ return example
373
+
374
+
375
+ class ImageNetSRTrain(ImageNetSR):
376
+ def __init__(self, **kwargs):
377
+ super().__init__(**kwargs)
378
+
379
+ def get_base(self):
380
+ with open("data/imagenet_train_hr_indices.p", "rb") as f:
381
+ indices = pickle.load(f)
382
+ dset = ImageNetTrain(process_images=False,)
383
+ return Subset(dset, indices)
384
+
385
+
386
+ class ImageNetSRValidation(ImageNetSR):
387
+ def __init__(self, **kwargs):
388
+ super().__init__(**kwargs)
389
+
390
+ def get_base(self):
391
+ with open("data/imagenet_val_hr_indices.p", "rb") as f:
392
+ indices = pickle.load(f)
393
+ dset = ImageNetValidation(process_images=False,)
394
+ return Subset(dset, indices)
stable-diffusion/ldm/data/lsun.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import PIL
4
+ from PIL import Image
5
+ from torch.utils.data import Dataset
6
+ from torchvision import transforms
7
+
8
+
9
+ class LSUNBase(Dataset):
10
+ def __init__(self,
11
+ txt_file,
12
+ data_root,
13
+ size=None,
14
+ interpolation="bicubic",
15
+ flip_p=0.5
16
+ ):
17
+ self.data_paths = txt_file
18
+ self.data_root = data_root
19
+ with open(self.data_paths, "r") as f:
20
+ self.image_paths = f.read().splitlines()
21
+ self._length = len(self.image_paths)
22
+ self.labels = {
23
+ "relative_file_path_": [l for l in self.image_paths],
24
+ "file_path_": [os.path.join(self.data_root, l)
25
+ for l in self.image_paths],
26
+ }
27
+
28
+ self.size = size
29
+ self.interpolation = {"linear": PIL.Image.LINEAR,
30
+ "bilinear": PIL.Image.BILINEAR,
31
+ "bicubic": PIL.Image.BICUBIC,
32
+ "lanczos": PIL.Image.LANCZOS,
33
+ }[interpolation]
34
+ self.flip = transforms.RandomHorizontalFlip(p=flip_p)
35
+
36
+ def __len__(self):
37
+ return self._length
38
+
39
+ def __getitem__(self, i):
40
+ example = dict((k, self.labels[k][i]) for k in self.labels)
41
+ image = Image.open(example["file_path_"])
42
+ if not image.mode == "RGB":
43
+ image = image.convert("RGB")
44
+
45
+ # default to score-sde preprocessing
46
+ img = np.array(image).astype(np.uint8)
47
+ crop = min(img.shape[0], img.shape[1])
48
+ h, w, = img.shape[0], img.shape[1]
49
+ img = img[(h - crop) // 2:(h + crop) // 2,
50
+ (w - crop) // 2:(w + crop) // 2]
51
+
52
+ image = Image.fromarray(img)
53
+ if self.size is not None:
54
+ image = image.resize((self.size, self.size), resample=self.interpolation)
55
+
56
+ image = self.flip(image)
57
+ image = np.array(image).astype(np.uint8)
58
+ example["image"] = (image / 127.5 - 1.0).astype(np.float32)
59
+ return example
60
+
61
+
62
+ class LSUNChurchesTrain(LSUNBase):
63
+ def __init__(self, **kwargs):
64
+ super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs)
65
+
66
+
67
+ class LSUNChurchesValidation(LSUNBase):
68
+ def __init__(self, flip_p=0., **kwargs):
69
+ super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches",
70
+ flip_p=flip_p, **kwargs)
71
+
72
+
73
+ class LSUNBedroomsTrain(LSUNBase):
74
+ def __init__(self, **kwargs):
75
+ super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs)
76
+
77
+
78
+ class LSUNBedroomsValidation(LSUNBase):
79
+ def __init__(self, flip_p=0.0, **kwargs):
80
+ super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms",
81
+ flip_p=flip_p, **kwargs)
82
+
83
+
84
+ class LSUNCatsTrain(LSUNBase):
85
+ def __init__(self, **kwargs):
86
+ super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs)
87
+
88
+
89
+ class LSUNCatsValidation(LSUNBase):
90
+ def __init__(self, flip_p=0., **kwargs):
91
+ super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats",
92
+ flip_p=flip_p, **kwargs)
stable-diffusion/ldm/lr_scheduler.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ class LambdaWarmUpCosineScheduler:
5
+ """
6
+ note: use with a base_lr of 1.0
7
+ """
8
+ def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
9
+ self.lr_warm_up_steps = warm_up_steps
10
+ self.lr_start = lr_start
11
+ self.lr_min = lr_min
12
+ self.lr_max = lr_max
13
+ self.lr_max_decay_steps = max_decay_steps
14
+ self.last_lr = 0.
15
+ self.verbosity_interval = verbosity_interval
16
+
17
+ def schedule(self, n, **kwargs):
18
+ if self.verbosity_interval > 0:
19
+ if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
20
+ if n < self.lr_warm_up_steps:
21
+ lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start
22
+ self.last_lr = lr
23
+ return lr
24
+ else:
25
+ t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)
26
+ t = min(t, 1.0)
27
+ lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
28
+ 1 + np.cos(t * np.pi))
29
+ self.last_lr = lr
30
+ return lr
31
+
32
+ def __call__(self, n, **kwargs):
33
+ return self.schedule(n,**kwargs)
34
+
35
+
36
+ class LambdaWarmUpCosineScheduler2:
37
+ """
38
+ supports repeated iterations, configurable via lists
39
+ note: use with a base_lr of 1.0.
40
+ """
41
+ def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
42
+ assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths)
43
+ self.lr_warm_up_steps = warm_up_steps
44
+ self.f_start = f_start
45
+ self.f_min = f_min
46
+ self.f_max = f_max
47
+ self.cycle_lengths = cycle_lengths
48
+ self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths))
49
+ self.last_f = 0.
50
+ self.verbosity_interval = verbosity_interval
51
+
52
+ def find_in_interval(self, n):
53
+ interval = 0
54
+ for cl in self.cum_cycles[1:]:
55
+ if n <= cl:
56
+ return interval
57
+ interval += 1
58
+
59
+ def schedule(self, n, **kwargs):
60
+ cycle = self.find_in_interval(n)
61
+ n = n - self.cum_cycles[cycle]
62
+ if self.verbosity_interval > 0:
63
+ if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
64
+ f"current cycle {cycle}")
65
+ if n < self.lr_warm_up_steps[cycle]:
66
+ f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
67
+ self.last_f = f
68
+ return f
69
+ else:
70
+ t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle])
71
+ t = min(t, 1.0)
72
+ f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * (
73
+ 1 + np.cos(t * np.pi))
74
+ self.last_f = f
75
+ return f
76
+
77
+ def __call__(self, n, **kwargs):
78
+ return self.schedule(n, **kwargs)
79
+
80
+
81
+ class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
82
+
83
+ def schedule(self, n, **kwargs):
84
+ cycle = self.find_in_interval(n)
85
+ n = n - self.cum_cycles[cycle]
86
+ if self.verbosity_interval > 0:
87
+ if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
88
+ f"current cycle {cycle}")
89
+
90
+ if n < self.lr_warm_up_steps[cycle]:
91
+ f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
92
+ self.last_f = f
93
+ return f
94
+ else:
95
+ f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle])
96
+ self.last_f = f
97
+ return f
98
+
stable-diffusion/ldm/models/__pycache__/autoencoder.cpython-38.pyc ADDED
Binary file (13.5 kB). View file
 
stable-diffusion/ldm/models/autoencoder.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import pytorch_lightning as pl
3
+ import torch.nn.functional as F
4
+ from contextlib import contextmanager
5
+
6
+ from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
7
+
8
+ from ldm.modules.diffusionmodules.model import Encoder, Decoder
9
+ from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
10
+
11
+ from ldm.util import instantiate_from_config
12
+
13
+
14
+ class VQModel(pl.LightningModule):
15
+ def __init__(self,
16
+ ddconfig,
17
+ lossconfig,
18
+ n_embed,
19
+ embed_dim,
20
+ ckpt_path=None,
21
+ ignore_keys=[],
22
+ image_key="image",
23
+ colorize_nlabels=None,
24
+ monitor=None,
25
+ batch_resize_range=None,
26
+ scheduler_config=None,
27
+ lr_g_factor=1.0,
28
+ remap=None,
29
+ sane_index_shape=False, # tell vector quantizer to return indices as bhw
30
+ use_ema=False
31
+ ):
32
+ super().__init__()
33
+ self.embed_dim = embed_dim
34
+ self.n_embed = n_embed
35
+ self.image_key = image_key
36
+ self.encoder = Encoder(**ddconfig)
37
+ self.decoder = Decoder(**ddconfig)
38
+ self.loss = instantiate_from_config(lossconfig)
39
+ self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
40
+ remap=remap,
41
+ sane_index_shape=sane_index_shape)
42
+ self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
43
+ self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
44
+ if colorize_nlabels is not None:
45
+ assert type(colorize_nlabels)==int
46
+ self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
47
+ if monitor is not None:
48
+ self.monitor = monitor
49
+ self.batch_resize_range = batch_resize_range
50
+ if self.batch_resize_range is not None:
51
+ print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
52
+
53
+ self.use_ema = use_ema
54
+ if self.use_ema:
55
+ self.model_ema = LitEma(self)
56
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
57
+
58
+ if ckpt_path is not None:
59
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
60
+ self.scheduler_config = scheduler_config
61
+ self.lr_g_factor = lr_g_factor
62
+
63
+ @contextmanager
64
+ def ema_scope(self, context=None):
65
+ if self.use_ema:
66
+ self.model_ema.store(self.parameters())
67
+ self.model_ema.copy_to(self)
68
+ if context is not None:
69
+ print(f"{context}: Switched to EMA weights")
70
+ try:
71
+ yield None
72
+ finally:
73
+ if self.use_ema:
74
+ self.model_ema.restore(self.parameters())
75
+ if context is not None:
76
+ print(f"{context}: Restored training weights")
77
+
78
+ def init_from_ckpt(self, path, ignore_keys=list()):
79
+ sd = torch.load(path, map_location="cpu")["state_dict"]
80
+ keys = list(sd.keys())
81
+ for k in keys:
82
+ for ik in ignore_keys:
83
+ if k.startswith(ik):
84
+ print("Deleting key {} from state_dict.".format(k))
85
+ del sd[k]
86
+ missing, unexpected = self.load_state_dict(sd, strict=False)
87
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
88
+ if len(missing) > 0:
89
+ print(f"Missing Keys: {missing}")
90
+ print(f"Unexpected Keys: {unexpected}")
91
+
92
+ def on_train_batch_end(self, *args, **kwargs):
93
+ if self.use_ema:
94
+ self.model_ema(self)
95
+
96
+ def encode(self, x):
97
+ h = self.encoder(x)
98
+ h = self.quant_conv(h)
99
+ quant, emb_loss, info = self.quantize(h)
100
+ return quant, emb_loss, info
101
+
102
+ def encode_to_prequant(self, x):
103
+ h = self.encoder(x)
104
+ h = self.quant_conv(h)
105
+ return h
106
+
107
+ def decode(self, quant):
108
+ quant = self.post_quant_conv(quant)
109
+ dec = self.decoder(quant)
110
+ return dec
111
+
112
+ def decode_code(self, code_b):
113
+ quant_b = self.quantize.embed_code(code_b)
114
+ dec = self.decode(quant_b)
115
+ return dec
116
+
117
+ def forward(self, input, return_pred_indices=False):
118
+ quant, diff, (_,_,ind) = self.encode(input)
119
+ dec = self.decode(quant)
120
+ if return_pred_indices:
121
+ return dec, diff, ind
122
+ return dec, diff
123
+
124
+ def get_input(self, batch, k):
125
+ x = batch[k]
126
+ if len(x.shape) == 3:
127
+ x = x[..., None]
128
+ x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
129
+ if self.batch_resize_range is not None:
130
+ lower_size = self.batch_resize_range[0]
131
+ upper_size = self.batch_resize_range[1]
132
+ if self.global_step <= 4:
133
+ # do the first few batches with max size to avoid later oom
134
+ new_resize = upper_size
135
+ else:
136
+ new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
137
+ if new_resize != x.shape[2]:
138
+ x = F.interpolate(x, size=new_resize, mode="bicubic")
139
+ x = x.detach()
140
+ return x
141
+
142
+ def training_step(self, batch, batch_idx, optimizer_idx):
143
+ # https://github.com/pytorch/pytorch/issues/37142
144
+ # try not to fool the heuristics
145
+ x = self.get_input(batch, self.image_key)
146
+ xrec, qloss, ind = self(x, return_pred_indices=True)
147
+
148
+ if optimizer_idx == 0:
149
+ # autoencode
150
+ aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
151
+ last_layer=self.get_last_layer(), split="train",
152
+ predicted_indices=ind)
153
+
154
+ self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
155
+ return aeloss
156
+
157
+ if optimizer_idx == 1:
158
+ # discriminator
159
+ discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
160
+ last_layer=self.get_last_layer(), split="train")
161
+ self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
162
+ return discloss
163
+
164
+ def validation_step(self, batch, batch_idx):
165
+ log_dict = self._validation_step(batch, batch_idx)
166
+ with self.ema_scope():
167
+ log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
168
+ return log_dict
169
+
170
+ def _validation_step(self, batch, batch_idx, suffix=""):
171
+ x = self.get_input(batch, self.image_key)
172
+ xrec, qloss, ind = self(x, return_pred_indices=True)
173
+ aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
174
+ self.global_step,
175
+ last_layer=self.get_last_layer(),
176
+ split="val"+suffix,
177
+ predicted_indices=ind
178
+ )
179
+
180
+ discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
181
+ self.global_step,
182
+ last_layer=self.get_last_layer(),
183
+ split="val"+suffix,
184
+ predicted_indices=ind
185
+ )
186
+ rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
187
+ self.log(f"val{suffix}/rec_loss", rec_loss,
188
+ prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
189
+ self.log(f"val{suffix}/aeloss", aeloss,
190
+ prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
191
+ if version.parse(pl.__version__) >= version.parse('1.4.0'):
192
+ del log_dict_ae[f"val{suffix}/rec_loss"]
193
+ self.log_dict(log_dict_ae)
194
+ self.log_dict(log_dict_disc)
195
+ return self.log_dict
196
+
197
+ def configure_optimizers(self):
198
+ lr_d = self.learning_rate
199
+ lr_g = self.lr_g_factor*self.learning_rate
200
+ print("lr_d", lr_d)
201
+ print("lr_g", lr_g)
202
+ opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
203
+ list(self.decoder.parameters())+
204
+ list(self.quantize.parameters())+
205
+ list(self.quant_conv.parameters())+
206
+ list(self.post_quant_conv.parameters()),
207
+ lr=lr_g, betas=(0.5, 0.9))
208
+ opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
209
+ lr=lr_d, betas=(0.5, 0.9))
210
+
211
+ if self.scheduler_config is not None:
212
+ scheduler = instantiate_from_config(self.scheduler_config)
213
+
214
+ print("Setting up LambdaLR scheduler...")
215
+ scheduler = [
216
+ {
217
+ 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
218
+ 'interval': 'step',
219
+ 'frequency': 1
220
+ },
221
+ {
222
+ 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
223
+ 'interval': 'step',
224
+ 'frequency': 1
225
+ },
226
+ ]
227
+ return [opt_ae, opt_disc], scheduler
228
+ return [opt_ae, opt_disc], []
229
+
230
+ def get_last_layer(self):
231
+ return self.decoder.conv_out.weight
232
+
233
+ def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
234
+ log = dict()
235
+ x = self.get_input(batch, self.image_key)
236
+ x = x.to(self.device)
237
+ if only_inputs:
238
+ log["inputs"] = x
239
+ return log
240
+ xrec, _ = self(x)
241
+ if x.shape[1] > 3:
242
+ # colorize with random projection
243
+ assert xrec.shape[1] > 3
244
+ x = self.to_rgb(x)
245
+ xrec = self.to_rgb(xrec)
246
+ log["inputs"] = x
247
+ log["reconstructions"] = xrec
248
+ if plot_ema:
249
+ with self.ema_scope():
250
+ xrec_ema, _ = self(x)
251
+ if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
252
+ log["reconstructions_ema"] = xrec_ema
253
+ return log
254
+
255
+ def to_rgb(self, x):
256
+ assert self.image_key == "segmentation"
257
+ if not hasattr(self, "colorize"):
258
+ self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
259
+ x = F.conv2d(x, weight=self.colorize)
260
+ x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
261
+ return x
262
+
263
+
264
+ class VQModelInterface(VQModel):
265
+ def __init__(self, embed_dim, *args, **kwargs):
266
+ super().__init__(embed_dim=embed_dim, *args, **kwargs)
267
+ self.embed_dim = embed_dim
268
+
269
+ def encode(self, x):
270
+ h = self.encoder(x)
271
+ h = self.quant_conv(h)
272
+ return h
273
+
274
+ def decode(self, h, force_not_quantize=False):
275
+ # also go through quantization layer
276
+ if not force_not_quantize:
277
+ quant, emb_loss, info = self.quantize(h)
278
+ else:
279
+ quant = h
280
+ quant = self.post_quant_conv(quant)
281
+ dec = self.decoder(quant)
282
+ return dec
283
+
284
+
285
+ class AutoencoderKL(pl.LightningModule):
286
+ def __init__(self,
287
+ ddconfig,
288
+ lossconfig,
289
+ embed_dim,
290
+ ckpt_path=None,
291
+ ignore_keys=[],
292
+ image_key="image",
293
+ colorize_nlabels=None,
294
+ monitor=None,
295
+ ):
296
+ super().__init__()
297
+ self.image_key = image_key
298
+ self.encoder = Encoder(**ddconfig)
299
+ self.decoder = Decoder(**ddconfig)
300
+ self.loss = instantiate_from_config(lossconfig)
301
+ assert ddconfig["double_z"]
302
+ self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
303
+ self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
304
+ self.embed_dim = embed_dim
305
+ if colorize_nlabels is not None:
306
+ assert type(colorize_nlabels)==int
307
+ self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
308
+ if monitor is not None:
309
+ self.monitor = monitor
310
+ if ckpt_path is not None:
311
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
312
+
313
+ def init_from_ckpt(self, path, ignore_keys=list()):
314
+ sd = torch.load(path, map_location="cpu")["state_dict"]
315
+ keys = list(sd.keys())
316
+ for k in keys:
317
+ for ik in ignore_keys:
318
+ if k.startswith(ik):
319
+ print("Deleting key {} from state_dict.".format(k))
320
+ del sd[k]
321
+ self.load_state_dict(sd, strict=False)
322
+ print(f"Restored from {path}")
323
+
324
+ def encode(self, x):
325
+ h = self.encoder(x)
326
+ moments = self.quant_conv(h)
327
+ posterior = DiagonalGaussianDistribution(moments)
328
+ return posterior
329
+
330
+ def decode(self, z):
331
+ z = self.post_quant_conv(z)
332
+ dec = self.decoder(z)
333
+ return dec
334
+
335
+ def forward(self, input, sample_posterior=True):
336
+ posterior = self.encode(input)
337
+ if sample_posterior:
338
+ z = posterior.sample()
339
+ else:
340
+ z = posterior.mode()
341
+ dec = self.decode(z)
342
+ return dec, posterior
343
+
344
+ def get_input(self, batch, k):
345
+ x = batch[k]
346
+ if len(x.shape) == 3:
347
+ x = x[..., None]
348
+ x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
349
+ return x
350
+
351
+ def training_step(self, batch, batch_idx, optimizer_idx):
352
+ inputs = self.get_input(batch, self.image_key)
353
+ reconstructions, posterior = self(inputs)
354
+
355
+ if optimizer_idx == 0:
356
+ # train encoder+decoder+logvar
357
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
358
+ last_layer=self.get_last_layer(), split="train")
359
+ self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
360
+ self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
361
+ return aeloss
362
+
363
+ if optimizer_idx == 1:
364
+ # train the discriminator
365
+ discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
366
+ last_layer=self.get_last_layer(), split="train")
367
+
368
+ self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
369
+ self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
370
+ return discloss
371
+
372
+ def validation_step(self, batch, batch_idx):
373
+ inputs = self.get_input(batch, self.image_key)
374
+ reconstructions, posterior = self(inputs)
375
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
376
+ last_layer=self.get_last_layer(), split="val")
377
+
378
+ discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
379
+ last_layer=self.get_last_layer(), split="val")
380
+
381
+ self.log("val/rec_loss", log_dict_ae["val/rec_loss"])
382
+ self.log_dict(log_dict_ae)
383
+ self.log_dict(log_dict_disc)
384
+ return self.log_dict
385
+
386
+ def configure_optimizers(self):
387
+ lr = self.learning_rate
388
+ opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
389
+ list(self.decoder.parameters())+
390
+ list(self.quant_conv.parameters())+
391
+ list(self.post_quant_conv.parameters()),
392
+ lr=lr, betas=(0.5, 0.9))
393
+ opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
394
+ lr=lr, betas=(0.5, 0.9))
395
+ return [opt_ae, opt_disc], []
396
+
397
+ def get_last_layer(self):
398
+ return self.decoder.conv_out.weight
399
+
400
+ @torch.no_grad()
401
+ def log_images(self, batch, only_inputs=False, **kwargs):
402
+ log = dict()
403
+ x = self.get_input(batch, self.image_key)
404
+ x = x.to(self.device)
405
+ if not only_inputs:
406
+ xrec, posterior = self(x)
407
+ if x.shape[1] > 3:
408
+ # colorize with random projection
409
+ assert xrec.shape[1] > 3
410
+ x = self.to_rgb(x)
411
+ xrec = self.to_rgb(xrec)
412
+ log["samples"] = self.decode(torch.randn_like(posterior.sample()))
413
+ log["reconstructions"] = xrec
414
+ log["inputs"] = x
415
+ return log
416
+
417
+ def to_rgb(self, x):
418
+ assert self.image_key == "segmentation"
419
+ if not hasattr(self, "colorize"):
420
+ self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
421
+ x = F.conv2d(x, weight=self.colorize)
422
+ x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
423
+ return x
424
+
425
+
426
+ class IdentityFirstStage(torch.nn.Module):
427
+ def __init__(self, *args, vq_interface=False, **kwargs):
428
+ self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff
429
+ super().__init__()
430
+
431
+ def encode(self, x, *args, **kwargs):
432
+ return x
433
+
434
+ def decode(self, x, *args, **kwargs):
435
+ return x
436
+
437
+ def quantize(self, x, *args, **kwargs):
438
+ if self.vq_interface:
439
+ return x, None, [None, None, None]
440
+ return x
441
+
442
+ def forward(self, x, *args, **kwargs):
443
+ return x
stable-diffusion/ldm/models/diffusion/__init__.py ADDED
File without changes
stable-diffusion/ldm/models/diffusion/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (169 Bytes). View file
 
stable-diffusion/ldm/models/diffusion/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (169 Bytes). View file
 
stable-diffusion/ldm/models/diffusion/__pycache__/ddim.cpython-38.pyc ADDED
Binary file (7.18 kB). View file
 
stable-diffusion/ldm/models/diffusion/__pycache__/ddim.cpython-39.pyc ADDED
Binary file (7.12 kB). View file
 
stable-diffusion/ldm/models/diffusion/__pycache__/ddpm.cpython-38.pyc ADDED
Binary file (43.8 kB). View file
 
stable-diffusion/ldm/models/diffusion/__pycache__/ddpm.cpython-39.pyc ADDED
Binary file (43.8 kB). View file
 
stable-diffusion/ldm/models/diffusion/__pycache__/plms.cpython-38.pyc ADDED
Binary file (7.37 kB). View file
 
stable-diffusion/ldm/models/diffusion/__pycache__/plms.cpython-39.pyc ADDED
Binary file (7.31 kB). View file
 
stable-diffusion/ldm/models/diffusion/__pycache__/psld.cpython-38.pyc ADDED
Binary file (9.8 kB). View file
 
stable-diffusion/ldm/models/diffusion/__pycache__/psld.cpython-39.pyc ADDED
Binary file (9.79 kB). View file
 
stable-diffusion/ldm/models/diffusion/classifier.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import pytorch_lightning as pl
4
+ from omegaconf import OmegaConf
5
+ from torch.nn import functional as F
6
+ from torch.optim import AdamW
7
+ from torch.optim.lr_scheduler import LambdaLR
8
+ from copy import deepcopy
9
+ from einops import rearrange
10
+ from glob import glob
11
+ from natsort import natsorted
12
+
13
+ from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel
14
+ from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config
15
+
16
+ __models__ = {
17
+ 'class_label': EncoderUNetModel,
18
+ 'segmentation': UNetModel
19
+ }
20
+
21
+
22
+ def disabled_train(self, mode=True):
23
+ """Overwrite model.train with this function to make sure train/eval mode
24
+ does not change anymore."""
25
+ return self
26
+
27
+
28
+ class NoisyLatentImageClassifier(pl.LightningModule):
29
+
30
+ def __init__(self,
31
+ diffusion_path,
32
+ num_classes,
33
+ ckpt_path=None,
34
+ pool='attention',
35
+ label_key=None,
36
+ diffusion_ckpt_path=None,
37
+ scheduler_config=None,
38
+ weight_decay=1.e-2,
39
+ log_steps=10,
40
+ monitor='val/loss',
41
+ *args,
42
+ **kwargs):
43
+ super().__init__(*args, **kwargs)
44
+ self.num_classes = num_classes
45
+ # get latest config of diffusion model
46
+ diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1]
47
+ self.diffusion_config = OmegaConf.load(diffusion_config).model
48
+ self.diffusion_config.params.ckpt_path = diffusion_ckpt_path
49
+ self.load_diffusion()
50
+
51
+ self.monitor = monitor
52
+ self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1
53
+ self.log_time_interval = self.diffusion_model.num_timesteps // log_steps
54
+ self.log_steps = log_steps
55
+
56
+ self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \
57
+ else self.diffusion_model.cond_stage_key
58
+
59
+ assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params'
60
+
61
+ if self.label_key not in __models__:
62
+ raise NotImplementedError()
63
+
64
+ self.load_classifier(ckpt_path, pool)
65
+
66
+ self.scheduler_config = scheduler_config
67
+ self.use_scheduler = self.scheduler_config is not None
68
+ self.weight_decay = weight_decay
69
+
70
+ def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
71
+ sd = torch.load(path, map_location="cpu")
72
+ if "state_dict" in list(sd.keys()):
73
+ sd = sd["state_dict"]
74
+ keys = list(sd.keys())
75
+ for k in keys:
76
+ for ik in ignore_keys:
77
+ if k.startswith(ik):
78
+ print("Deleting key {} from state_dict.".format(k))
79
+ del sd[k]
80
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
81
+ sd, strict=False)
82
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
83
+ if len(missing) > 0:
84
+ print(f"Missing Keys: {missing}")
85
+ if len(unexpected) > 0:
86
+ print(f"Unexpected Keys: {unexpected}")
87
+
88
+ def load_diffusion(self):
89
+ model = instantiate_from_config(self.diffusion_config)
90
+ self.diffusion_model = model.eval()
91
+ self.diffusion_model.train = disabled_train
92
+ for param in self.diffusion_model.parameters():
93
+ param.requires_grad = False
94
+
95
+ def load_classifier(self, ckpt_path, pool):
96
+ model_config = deepcopy(self.diffusion_config.params.unet_config.params)
97
+ model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels
98
+ model_config.out_channels = self.num_classes
99
+ if self.label_key == 'class_label':
100
+ model_config.pool = pool
101
+
102
+ self.model = __models__[self.label_key](**model_config)
103
+ if ckpt_path is not None:
104
+ print('#####################################################################')
105
+ print(f'load from ckpt "{ckpt_path}"')
106
+ print('#####################################################################')
107
+ self.init_from_ckpt(ckpt_path)
108
+
109
+ @torch.no_grad()
110
+ def get_x_noisy(self, x, t, noise=None):
111
+ noise = default(noise, lambda: torch.randn_like(x))
112
+ continuous_sqrt_alpha_cumprod = None
113
+ if self.diffusion_model.use_continuous_noise:
114
+ continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1)
115
+ # todo: make sure t+1 is correct here
116
+
117
+ return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise,
118
+ continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod)
119
+
120
+ def forward(self, x_noisy, t, *args, **kwargs):
121
+ return self.model(x_noisy, t)
122
+
123
+ @torch.no_grad()
124
+ def get_input(self, batch, k):
125
+ x = batch[k]
126
+ if len(x.shape) == 3:
127
+ x = x[..., None]
128
+ x = rearrange(x, 'b h w c -> b c h w')
129
+ x = x.to(memory_format=torch.contiguous_format).float()
130
+ return x
131
+
132
+ @torch.no_grad()
133
+ def get_conditioning(self, batch, k=None):
134
+ if k is None:
135
+ k = self.label_key
136
+ assert k is not None, 'Needs to provide label key'
137
+
138
+ targets = batch[k].to(self.device)
139
+
140
+ if self.label_key == 'segmentation':
141
+ targets = rearrange(targets, 'b h w c -> b c h w')
142
+ for down in range(self.numd):
143
+ h, w = targets.shape[-2:]
144
+ targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest')
145
+
146
+ # targets = rearrange(targets,'b c h w -> b h w c')
147
+
148
+ return targets
149
+
150
+ def compute_top_k(self, logits, labels, k, reduction="mean"):
151
+ _, top_ks = torch.topk(logits, k, dim=1)
152
+ if reduction == "mean":
153
+ return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
154
+ elif reduction == "none":
155
+ return (top_ks == labels[:, None]).float().sum(dim=-1)
156
+
157
+ def on_train_epoch_start(self):
158
+ # save some memory
159
+ self.diffusion_model.model.to('cpu')
160
+
161
+ @torch.no_grad()
162
+ def write_logs(self, loss, logits, targets):
163
+ log_prefix = 'train' if self.training else 'val'
164
+ log = {}
165
+ log[f"{log_prefix}/loss"] = loss.mean()
166
+ log[f"{log_prefix}/acc@1"] = self.compute_top_k(
167
+ logits, targets, k=1, reduction="mean"
168
+ )
169
+ log[f"{log_prefix}/acc@5"] = self.compute_top_k(
170
+ logits, targets, k=5, reduction="mean"
171
+ )
172
+
173
+ self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True)
174
+ self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False)
175
+ self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True)
176
+ lr = self.optimizers().param_groups[0]['lr']
177
+ self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True)
178
+
179
+ def shared_step(self, batch, t=None):
180
+ x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key)
181
+ targets = self.get_conditioning(batch)
182
+ if targets.dim() == 4:
183
+ targets = targets.argmax(dim=1)
184
+ if t is None:
185
+ t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long()
186
+ else:
187
+ t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long()
188
+ x_noisy = self.get_x_noisy(x, t)
189
+ logits = self(x_noisy, t)
190
+
191
+ loss = F.cross_entropy(logits, targets, reduction='none')
192
+
193
+ self.write_logs(loss.detach(), logits.detach(), targets.detach())
194
+
195
+ loss = loss.mean()
196
+ return loss, logits, x_noisy, targets
197
+
198
+ def training_step(self, batch, batch_idx):
199
+ loss, *_ = self.shared_step(batch)
200
+ return loss
201
+
202
+ def reset_noise_accs(self):
203
+ self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in
204
+ range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)}
205
+
206
+ def on_validation_start(self):
207
+ self.reset_noise_accs()
208
+
209
+ @torch.no_grad()
210
+ def validation_step(self, batch, batch_idx):
211
+ loss, *_ = self.shared_step(batch)
212
+
213
+ for t in self.noisy_acc:
214
+ _, logits, _, targets = self.shared_step(batch, t)
215
+ self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean'))
216
+ self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean'))
217
+
218
+ return loss
219
+
220
+ def configure_optimizers(self):
221
+ optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
222
+
223
+ if self.use_scheduler:
224
+ scheduler = instantiate_from_config(self.scheduler_config)
225
+
226
+ print("Setting up LambdaLR scheduler...")
227
+ scheduler = [
228
+ {
229
+ 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule),
230
+ 'interval': 'step',
231
+ 'frequency': 1
232
+ }]
233
+ return [optimizer], scheduler
234
+
235
+ return optimizer
236
+
237
+ @torch.no_grad()
238
+ def log_images(self, batch, N=8, *args, **kwargs):
239
+ log = dict()
240
+ x = self.get_input(batch, self.diffusion_model.first_stage_key)
241
+ log['inputs'] = x
242
+
243
+ y = self.get_conditioning(batch)
244
+
245
+ if self.label_key == 'class_label':
246
+ y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
247
+ log['labels'] = y
248
+
249
+ if ismap(y):
250
+ log['labels'] = self.diffusion_model.to_rgb(y)
251
+
252
+ for step in range(self.log_steps):
253
+ current_time = step * self.log_time_interval
254
+
255
+ _, logits, x_noisy, _ = self.shared_step(batch, t=current_time)
256
+
257
+ log[f'inputs@t{current_time}'] = x_noisy
258
+
259
+ pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes)
260
+ pred = rearrange(pred, 'b h w c -> b c h w')
261
+
262
+ log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred)
263
+
264
+ for key in log:
265
+ log[key] = log[key][:N]
266
+
267
+ return log
stable-diffusion/ldm/models/diffusion/ddim.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SAMPLING ONLY."""
2
+
3
+ import torch
4
+ import numpy as np
5
+ from tqdm import tqdm
6
+ from functools import partial
7
+
8
+ from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, \
9
+ extract_into_tensor
10
+
11
+
12
+ class DDIMSampler(object):
13
+ def __init__(self, model, schedule="linear", **kwargs):
14
+ super().__init__()
15
+ self.model = model
16
+ self.ddpm_num_timesteps = model.num_timesteps
17
+ self.schedule = schedule
18
+
19
+ def register_buffer(self, name, attr):
20
+ if type(attr) == torch.Tensor:
21
+ if attr.device != torch.device("cuda"):
22
+ attr = attr.to(torch.device("cuda"))
23
+ setattr(self, name, attr)
24
+
25
+ def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
26
+ self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
27
+ num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
28
+ alphas_cumprod = self.model.alphas_cumprod
29
+ assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
30
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
31
+
32
+ self.register_buffer('betas', to_torch(self.model.betas))
33
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
34
+ self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
35
+
36
+ # calculations for diffusion q(x_t | x_{t-1}) and others
37
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
38
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
39
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
40
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
41
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
42
+
43
+ # ddim sampling parameters
44
+ ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
45
+ ddim_timesteps=self.ddim_timesteps,
46
+ eta=ddim_eta,verbose=verbose)
47
+ self.register_buffer('ddim_sigmas', ddim_sigmas)
48
+ self.register_buffer('ddim_alphas', ddim_alphas)
49
+ self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
50
+ self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
51
+ sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
52
+ (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
53
+ 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
54
+ self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
55
+
56
+ @torch.no_grad()
57
+ def sample(self,
58
+ S,
59
+ batch_size,
60
+ shape,
61
+ conditioning=None,
62
+ callback=None,
63
+ normals_sequence=None,
64
+ img_callback=None,
65
+ quantize_x0=False,
66
+ eta=0.,
67
+ mask=None,
68
+ x0=None,
69
+ temperature=1.,
70
+ noise_dropout=0.,
71
+ score_corrector=None,
72
+ corrector_kwargs=None,
73
+ verbose=True,
74
+ x_T=None,
75
+ log_every_t=100,
76
+ unconditional_guidance_scale=1.,
77
+ unconditional_conditioning=None,
78
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
79
+ **kwargs
80
+ ):
81
+ if conditioning is not None:
82
+ if isinstance(conditioning, dict):
83
+ cbs = conditioning[list(conditioning.keys())[0]].shape[0]
84
+ if cbs != batch_size:
85
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
86
+ else:
87
+ if conditioning.shape[0] != batch_size:
88
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
89
+
90
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
91
+ # sampling
92
+ C, H, W = shape
93
+ size = (batch_size, C, H, W)
94
+ print(f'Data shape for DDIM sampling is {size}, eta {eta}')
95
+
96
+ samples, intermediates = self.ddim_sampling(conditioning, size,
97
+ callback=callback,
98
+ img_callback=img_callback,
99
+ quantize_denoised=quantize_x0,
100
+ mask=mask, x0=x0,
101
+ ddim_use_original_steps=False,
102
+ noise_dropout=noise_dropout,
103
+ temperature=temperature,
104
+ score_corrector=score_corrector,
105
+ corrector_kwargs=corrector_kwargs,
106
+ x_T=x_T,
107
+ log_every_t=log_every_t,
108
+ unconditional_guidance_scale=unconditional_guidance_scale,
109
+ unconditional_conditioning=unconditional_conditioning,
110
+ )
111
+ return samples, intermediates
112
+
113
+ @torch.no_grad()
114
+ def ddim_sampling(self, cond, shape,
115
+ x_T=None, ddim_use_original_steps=False,
116
+ callback=None, timesteps=None, quantize_denoised=False,
117
+ mask=None, x0=None, img_callback=None, log_every_t=100,
118
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
119
+ unconditional_guidance_scale=1., unconditional_conditioning=None,):
120
+ device = self.model.betas.device
121
+ b = shape[0]
122
+ if x_T is None:
123
+ img = torch.randn(shape, device=device)
124
+ else:
125
+ img = x_T
126
+
127
+ if timesteps is None:
128
+ timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
129
+ elif timesteps is not None and not ddim_use_original_steps:
130
+ subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
131
+ timesteps = self.ddim_timesteps[:subset_end]
132
+
133
+ intermediates = {'x_inter': [img], 'pred_x0': [img]}
134
+ time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
135
+ total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
136
+ print(f"Running DDIM Sampling with {total_steps} timesteps")
137
+
138
+ iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
139
+
140
+ for i, step in enumerate(iterator):
141
+ index = total_steps - i - 1
142
+ ts = torch.full((b,), step, device=device, dtype=torch.long)
143
+
144
+ if mask is not None:
145
+ assert x0 is not None
146
+ img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
147
+ img = img_orig * mask + (1. - mask) * img
148
+
149
+ outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
150
+ quantize_denoised=quantize_denoised, temperature=temperature,
151
+ noise_dropout=noise_dropout, score_corrector=score_corrector,
152
+ corrector_kwargs=corrector_kwargs,
153
+ unconditional_guidance_scale=unconditional_guidance_scale,
154
+ unconditional_conditioning=unconditional_conditioning)
155
+ img, pred_x0 = outs
156
+ if callback: callback(i)
157
+ if img_callback: img_callback(pred_x0, i)
158
+
159
+ if index % log_every_t == 0 or index == total_steps - 1:
160
+ intermediates['x_inter'].append(img)
161
+ intermediates['pred_x0'].append(pred_x0)
162
+
163
+ return img, intermediates
164
+
165
+ @torch.no_grad()
166
+ def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
167
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
168
+ unconditional_guidance_scale=1., unconditional_conditioning=None):
169
+ b, *_, device = *x.shape, x.device
170
+
171
+ if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
172
+ e_t = self.model.apply_model(x, t, c)
173
+ else:
174
+ x_in = torch.cat([x] * 2)
175
+ t_in = torch.cat([t] * 2)
176
+ c_in = torch.cat([unconditional_conditioning, c])
177
+ e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
178
+ e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
179
+
180
+ if score_corrector is not None:
181
+ assert self.model.parameterization == "eps"
182
+ e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
183
+
184
+ alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
185
+ alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
186
+ sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
187
+ sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
188
+ # select parameters corresponding to the currently considered timestep
189
+ a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
190
+ a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
191
+ sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
192
+ sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
193
+
194
+ # current prediction for x_0
195
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
196
+ if quantize_denoised:
197
+ pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
198
+ # direction pointing to x_t
199
+ dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
200
+ noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
201
+ if noise_dropout > 0.:
202
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
203
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
204
+ return x_prev, pred_x0
205
+
206
+ @torch.no_grad()
207
+ def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
208
+ # fast, but does not allow for exact reconstruction
209
+ # t serves as an index to gather the correct alphas
210
+ if use_original_steps:
211
+ sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
212
+ sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
213
+ else:
214
+ sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
215
+ sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
216
+
217
+ if noise is None:
218
+ noise = torch.randn_like(x0)
219
+ return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
220
+ extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
221
+
222
+ @torch.no_grad()
223
+ def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
224
+ use_original_steps=False):
225
+
226
+ timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
227
+ timesteps = timesteps[:t_start]
228
+
229
+ time_range = np.flip(timesteps)
230
+ total_steps = timesteps.shape[0]
231
+ print(f"Running DDIM Sampling with {total_steps} timesteps")
232
+
233
+ iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
234
+ x_dec = x_latent
235
+ for i, step in enumerate(iterator):
236
+ index = total_steps - i - 1
237
+ ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
238
+ x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
239
+ unconditional_guidance_scale=unconditional_guidance_scale,
240
+ unconditional_conditioning=unconditional_conditioning)
241
+ return x_dec
stable-diffusion/ldm/models/diffusion/ddpm.py ADDED
@@ -0,0 +1,1445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ wild mixture of
3
+ https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
+ https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
+ https://github.com/CompVis/taming-transformers
6
+ -- merci
7
+ """
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import numpy as np
12
+ import pytorch_lightning as pl
13
+ from torch.optim.lr_scheduler import LambdaLR
14
+ from einops import rearrange, repeat
15
+ from contextlib import contextmanager
16
+ from functools import partial
17
+ from tqdm import tqdm
18
+ from torchvision.utils import make_grid
19
+ from pytorch_lightning.utilities.distributed import rank_zero_only
20
+
21
+ from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
22
+ from ldm.modules.ema import LitEma
23
+ from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
24
+ from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
25
+ from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
26
+ from ldm.models.diffusion.ddim import DDIMSampler
27
+
28
+
29
+ __conditioning_keys__ = {'concat': 'c_concat',
30
+ 'crossattn': 'c_crossattn',
31
+ 'adm': 'y'}
32
+
33
+
34
+ def disabled_train(self, mode=True):
35
+ """Overwrite model.train with this function to make sure train/eval mode
36
+ does not change anymore."""
37
+ return self
38
+
39
+
40
+ def uniform_on_device(r1, r2, shape, device):
41
+ return (r1 - r2) * torch.rand(*shape, device=device) + r2
42
+
43
+
44
+ class DDPM(pl.LightningModule):
45
+ # classic DDPM with Gaussian diffusion, in image space
46
+ def __init__(self,
47
+ unet_config,
48
+ timesteps=1000,
49
+ beta_schedule="linear",
50
+ loss_type="l2",
51
+ ckpt_path=None,
52
+ ignore_keys=[],
53
+ load_only_unet=False,
54
+ monitor="val/loss",
55
+ use_ema=True,
56
+ first_stage_key="image",
57
+ image_size=256,
58
+ channels=3,
59
+ log_every_t=100,
60
+ clip_denoised=True,
61
+ linear_start=1e-4,
62
+ linear_end=2e-2,
63
+ cosine_s=8e-3,
64
+ given_betas=None,
65
+ original_elbo_weight=0.,
66
+ v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
67
+ l_simple_weight=1.,
68
+ conditioning_key=None,
69
+ parameterization="eps", # all assuming fixed variance schedules
70
+ scheduler_config=None,
71
+ use_positional_encodings=False,
72
+ learn_logvar=False,
73
+ logvar_init=0.,
74
+ ):
75
+ super().__init__()
76
+ assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
77
+ self.parameterization = parameterization
78
+ print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
79
+ self.cond_stage_model = None
80
+ self.clip_denoised = clip_denoised
81
+ self.log_every_t = log_every_t
82
+ self.first_stage_key = first_stage_key
83
+ self.image_size = image_size # try conv?
84
+ self.channels = channels
85
+ self.use_positional_encodings = use_positional_encodings
86
+ self.model = DiffusionWrapper(unet_config, conditioning_key)
87
+ count_params(self.model, verbose=True)
88
+ self.use_ema = use_ema
89
+ if self.use_ema:
90
+ self.model_ema = LitEma(self.model)
91
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
92
+
93
+ self.use_scheduler = scheduler_config is not None
94
+ if self.use_scheduler:
95
+ self.scheduler_config = scheduler_config
96
+
97
+ self.v_posterior = v_posterior
98
+ self.original_elbo_weight = original_elbo_weight
99
+ self.l_simple_weight = l_simple_weight
100
+
101
+ if monitor is not None:
102
+ self.monitor = monitor
103
+ if ckpt_path is not None:
104
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
105
+
106
+ self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
107
+ linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
108
+
109
+ self.loss_type = loss_type
110
+
111
+ self.learn_logvar = learn_logvar
112
+ self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
113
+ if self.learn_logvar:
114
+ self.logvar = nn.Parameter(self.logvar, requires_grad=True)
115
+
116
+
117
+ def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
118
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
119
+ if exists(given_betas):
120
+ betas = given_betas
121
+ else:
122
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
123
+ cosine_s=cosine_s)
124
+ alphas = 1. - betas
125
+ alphas_cumprod = np.cumprod(alphas, axis=0)
126
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
127
+
128
+ timesteps, = betas.shape
129
+ self.num_timesteps = int(timesteps)
130
+ self.linear_start = linear_start
131
+ self.linear_end = linear_end
132
+ assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
133
+
134
+ to_torch = partial(torch.tensor, dtype=torch.float32)
135
+
136
+ self.register_buffer('betas', to_torch(betas))
137
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
138
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
139
+
140
+ # calculations for diffusion q(x_t | x_{t-1}) and others
141
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
142
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
143
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
144
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
145
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
146
+
147
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
148
+ posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
149
+ 1. - alphas_cumprod) + self.v_posterior * betas
150
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
151
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
152
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
153
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
154
+ self.register_buffer('posterior_mean_coef1', to_torch(
155
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
156
+ self.register_buffer('posterior_mean_coef2', to_torch(
157
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
158
+
159
+ if self.parameterization == "eps":
160
+ lvlb_weights = self.betas ** 2 / (
161
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
162
+ elif self.parameterization == "x0":
163
+ lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
164
+ else:
165
+ raise NotImplementedError("mu not supported")
166
+ # TODO how to choose this term
167
+ lvlb_weights[0] = lvlb_weights[1]
168
+ self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
169
+ assert not torch.isnan(self.lvlb_weights).all()
170
+
171
+ @contextmanager
172
+ def ema_scope(self, context=None):
173
+ if self.use_ema:
174
+ self.model_ema.store(self.model.parameters())
175
+ self.model_ema.copy_to(self.model)
176
+ if context is not None:
177
+ print(f"{context}: Switched to EMA weights")
178
+ try:
179
+ yield None
180
+ finally:
181
+ if self.use_ema:
182
+ self.model_ema.restore(self.model.parameters())
183
+ if context is not None:
184
+ print(f"{context}: Restored training weights")
185
+
186
+ def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
187
+ sd = torch.load(path, map_location="cpu")
188
+ if "state_dict" in list(sd.keys()):
189
+ sd = sd["state_dict"]
190
+ keys = list(sd.keys())
191
+ for k in keys:
192
+ for ik in ignore_keys:
193
+ if k.startswith(ik):
194
+ print("Deleting key {} from state_dict.".format(k))
195
+ del sd[k]
196
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
197
+ sd, strict=False)
198
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
199
+ if len(missing) > 0:
200
+ print(f"Missing Keys: {missing}")
201
+ if len(unexpected) > 0:
202
+ print(f"Unexpected Keys: {unexpected}")
203
+
204
+ def q_mean_variance(self, x_start, t):
205
+ """
206
+ Get the distribution q(x_t | x_0).
207
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
208
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
209
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
210
+ """
211
+ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
212
+ variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
213
+ log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
214
+ return mean, variance, log_variance
215
+
216
+ def predict_start_from_noise(self, x_t, t, noise):
217
+ return (
218
+ extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
219
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
220
+ )
221
+
222
+ def q_posterior(self, x_start, x_t, t):
223
+ posterior_mean = (
224
+ extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
225
+ extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
226
+ )
227
+ posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
228
+ posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
229
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
230
+
231
+ def p_mean_variance(self, x, t, clip_denoised: bool):
232
+ model_out = self.model(x, t)
233
+ if self.parameterization == "eps":
234
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
235
+ elif self.parameterization == "x0":
236
+ x_recon = model_out
237
+ if clip_denoised:
238
+ x_recon.clamp_(-1., 1.)
239
+
240
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
241
+ return model_mean, posterior_variance, posterior_log_variance
242
+
243
+ # @torch.no_grad()
244
+ def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
245
+ b, *_, device = *x.shape, x.device
246
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
247
+ noise = noise_like(x.shape, device, repeat_noise)
248
+ # no noise when t == 0
249
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
250
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
251
+
252
+ # @torch.no_grad()
253
+ def p_sample_loop(self, shape, return_intermediates=False):
254
+ device = self.betas.device
255
+ b = shape[0]
256
+ img = torch.randn(shape, device=device)
257
+ intermediates = [img]
258
+ for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
259
+ img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
260
+ clip_denoised=self.clip_denoised)
261
+ if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
262
+ intermediates.append(img)
263
+ if return_intermediates:
264
+ return img, intermediates
265
+ return img
266
+
267
+ # @torch.no_grad()
268
+ def sample(self, batch_size=16, return_intermediates=False):
269
+ image_size = self.image_size
270
+ channels = self.channels
271
+ return self.p_sample_loop((batch_size, channels, image_size, image_size),
272
+ return_intermediates=return_intermediates)
273
+
274
+ def q_sample(self, x_start, t, noise=None):
275
+ noise = default(noise, lambda: torch.randn_like(x_start))
276
+ return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
277
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
278
+
279
+ def get_loss(self, pred, target, mean=True):
280
+ if self.loss_type == 'l1':
281
+ loss = (target - pred).abs()
282
+ if mean:
283
+ loss = loss.mean()
284
+ elif self.loss_type == 'l2':
285
+ if mean:
286
+ loss = torch.nn.functional.mse_loss(target, pred)
287
+ else:
288
+ loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
289
+ else:
290
+ raise NotImplementedError("unknown loss type '{loss_type}'")
291
+
292
+ return loss
293
+
294
+ def p_losses(self, x_start, t, noise=None):
295
+ noise = default(noise, lambda: torch.randn_like(x_start))
296
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
297
+ model_out = self.model(x_noisy, t)
298
+
299
+ loss_dict = {}
300
+ if self.parameterization == "eps":
301
+ target = noise
302
+ elif self.parameterization == "x0":
303
+ target = x_start
304
+ else:
305
+ raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
306
+
307
+ loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
308
+
309
+ log_prefix = 'train' if self.training else 'val'
310
+
311
+ loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
312
+ loss_simple = loss.mean() * self.l_simple_weight
313
+
314
+ loss_vlb = (self.lvlb_weights[t] * loss).mean()
315
+ loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
316
+
317
+ loss = loss_simple + self.original_elbo_weight * loss_vlb
318
+
319
+ loss_dict.update({f'{log_prefix}/loss': loss})
320
+
321
+ return loss, loss_dict
322
+
323
+ def forward(self, x, *args, **kwargs):
324
+ # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
325
+ # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
326
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
327
+ return self.p_losses(x, t, *args, **kwargs)
328
+
329
+ def get_input(self, batch, k):
330
+ x = batch[k]
331
+ if len(x.shape) == 3:
332
+ x = x[..., None]
333
+ x = rearrange(x, 'b h w c -> b c h w')
334
+ x = x.to(memory_format=torch.contiguous_format).float()
335
+ return x
336
+
337
+ def shared_step(self, batch):
338
+ x = self.get_input(batch, self.first_stage_key)
339
+ loss, loss_dict = self(x)
340
+ return loss, loss_dict
341
+
342
+ def training_step(self, batch, batch_idx):
343
+ loss, loss_dict = self.shared_step(batch)
344
+
345
+ self.log_dict(loss_dict, prog_bar=True,
346
+ logger=True, on_step=True, on_epoch=True)
347
+
348
+ self.log("global_step", self.global_step,
349
+ prog_bar=True, logger=True, on_step=True, on_epoch=False)
350
+
351
+ if self.use_scheduler:
352
+ lr = self.optimizers().param_groups[0]['lr']
353
+ self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
354
+
355
+ return loss
356
+
357
+ # @torch.no_grad()
358
+ def validation_step(self, batch, batch_idx):
359
+ _, loss_dict_no_ema = self.shared_step(batch)
360
+ with self.ema_scope():
361
+ _, loss_dict_ema = self.shared_step(batch)
362
+ loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
363
+ self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
364
+ self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
365
+
366
+ def on_train_batch_end(self, *args, **kwargs):
367
+ if self.use_ema:
368
+ self.model_ema(self.model)
369
+
370
+ def _get_rows_from_list(self, samples):
371
+ n_imgs_per_row = len(samples)
372
+ denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
373
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
374
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
375
+ return denoise_grid
376
+
377
+ # @torch.no_grad()
378
+ def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
379
+ log = dict()
380
+ x = self.get_input(batch, self.first_stage_key)
381
+ N = min(x.shape[0], N)
382
+ n_row = min(x.shape[0], n_row)
383
+ x = x.to(self.device)[:N]
384
+ log["inputs"] = x
385
+
386
+ # get diffusion row
387
+ diffusion_row = list()
388
+ x_start = x[:n_row]
389
+
390
+ for t in range(self.num_timesteps):
391
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
392
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
393
+ t = t.to(self.device).long()
394
+ noise = torch.randn_like(x_start)
395
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
396
+ diffusion_row.append(x_noisy)
397
+
398
+ log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
399
+
400
+ if sample:
401
+ # get denoise row
402
+ with self.ema_scope("Plotting"):
403
+ samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
404
+
405
+ log["samples"] = samples
406
+ log["denoise_row"] = self._get_rows_from_list(denoise_row)
407
+
408
+ if return_keys:
409
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
410
+ return log
411
+ else:
412
+ return {key: log[key] for key in return_keys}
413
+ return log
414
+
415
+ def configure_optimizers(self):
416
+ lr = self.learning_rate
417
+ params = list(self.model.parameters())
418
+ if self.learn_logvar:
419
+ params = params + [self.logvar]
420
+ opt = torch.optim.AdamW(params, lr=lr)
421
+ return opt
422
+
423
+
424
+ class LatentDiffusion(DDPM):
425
+ """main class"""
426
+ def __init__(self,
427
+ first_stage_config,
428
+ cond_stage_config,
429
+ num_timesteps_cond=None,
430
+ cond_stage_key="image",
431
+ cond_stage_trainable=False,
432
+ concat_mode=True,
433
+ cond_stage_forward=None,
434
+ conditioning_key=None,
435
+ scale_factor=1.0,
436
+ scale_by_std=False,
437
+ *args, **kwargs):
438
+ self.num_timesteps_cond = default(num_timesteps_cond, 1)
439
+ self.scale_by_std = scale_by_std
440
+ assert self.num_timesteps_cond <= kwargs['timesteps']
441
+ # for backwards compatibility after implementation of DiffusionWrapper
442
+ if conditioning_key is None:
443
+ conditioning_key = 'concat' if concat_mode else 'crossattn'
444
+ if cond_stage_config == '__is_unconditional__':
445
+ conditioning_key = None
446
+ ckpt_path = kwargs.pop("ckpt_path", None)
447
+ ignore_keys = kwargs.pop("ignore_keys", [])
448
+ super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
449
+ self.concat_mode = concat_mode
450
+ self.cond_stage_trainable = cond_stage_trainable
451
+ self.cond_stage_key = cond_stage_key
452
+ try:
453
+ self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
454
+ except:
455
+ self.num_downs = 0
456
+ if not scale_by_std:
457
+ self.scale_factor = scale_factor
458
+ else:
459
+ self.register_buffer('scale_factor', torch.tensor(scale_factor))
460
+ self.instantiate_first_stage(first_stage_config)
461
+ self.instantiate_cond_stage(cond_stage_config)
462
+ self.cond_stage_forward = cond_stage_forward
463
+ self.clip_denoised = False
464
+ self.bbox_tokenizer = None
465
+
466
+ self.restarted_from_ckpt = False
467
+ if ckpt_path is not None:
468
+ self.init_from_ckpt(ckpt_path, ignore_keys)
469
+ self.restarted_from_ckpt = True
470
+
471
+ def make_cond_schedule(self, ):
472
+ self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
473
+ ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
474
+ self.cond_ids[:self.num_timesteps_cond] = ids
475
+
476
+ @rank_zero_only
477
+ # @torch.no_grad()
478
+ def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
479
+ # only for very first batch
480
+ if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
481
+ assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
482
+ # set rescale weight to 1./std of encodings
483
+ print("### USING STD-RESCALING ###")
484
+ x = super().get_input(batch, self.first_stage_key)
485
+ x = x.to(self.device)
486
+ encoder_posterior = self.encode_first_stage(x)
487
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
488
+ del self.scale_factor
489
+ self.register_buffer('scale_factor', 1. / z.flatten().std())
490
+ print(f"setting self.scale_factor to {self.scale_factor}")
491
+ print("### USING STD-RESCALING ###")
492
+
493
+ def register_schedule(self,
494
+ given_betas=None, beta_schedule="linear", timesteps=1000,
495
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
496
+ super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
497
+
498
+ self.shorten_cond_schedule = self.num_timesteps_cond > 1
499
+ if self.shorten_cond_schedule:
500
+ self.make_cond_schedule()
501
+
502
+ def instantiate_first_stage(self, config):
503
+ model = instantiate_from_config(config)
504
+ self.first_stage_model = model.eval()
505
+ self.first_stage_model.train = disabled_train
506
+ for param in self.first_stage_model.parameters():
507
+ param.requires_grad = False
508
+
509
+ def instantiate_cond_stage(self, config):
510
+ if not self.cond_stage_trainable:
511
+ if config == "__is_first_stage__":
512
+ print("Using first stage also as cond stage.")
513
+ self.cond_stage_model = self.first_stage_model
514
+ elif config == "__is_unconditional__":
515
+ print(f"Training {self.__class__.__name__} as an unconditional model.")
516
+ self.cond_stage_model = None
517
+ # self.be_unconditional = True
518
+ else:
519
+ model = instantiate_from_config(config)
520
+ self.cond_stage_model = model.eval()
521
+ self.cond_stage_model.train = disabled_train
522
+ for param in self.cond_stage_model.parameters():
523
+ param.requires_grad = False
524
+ else:
525
+ assert config != '__is_first_stage__'
526
+ assert config != '__is_unconditional__'
527
+ model = instantiate_from_config(config)
528
+ self.cond_stage_model = model
529
+
530
+ def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
531
+ denoise_row = []
532
+ for zd in tqdm(samples, desc=desc):
533
+ denoise_row.append(self.decode_first_stage(zd.to(self.device),
534
+ force_not_quantize=force_no_decoder_quantization))
535
+ n_imgs_per_row = len(denoise_row)
536
+ denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
537
+ denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
538
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
539
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
540
+ return denoise_grid
541
+
542
+ def get_first_stage_encoding(self, encoder_posterior):
543
+ if isinstance(encoder_posterior, DiagonalGaussianDistribution):
544
+ z = encoder_posterior.sample()
545
+ elif isinstance(encoder_posterior, torch.Tensor):
546
+ z = encoder_posterior
547
+ else:
548
+ raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
549
+ return self.scale_factor * z
550
+
551
+ def get_learned_conditioning(self, c):
552
+ if self.cond_stage_forward is None:
553
+ if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
554
+ c = self.cond_stage_model.encode(c)
555
+ if isinstance(c, DiagonalGaussianDistribution):
556
+ c = c.mode()
557
+ else:
558
+ c = self.cond_stage_model(c)
559
+ else:
560
+ assert hasattr(self.cond_stage_model, self.cond_stage_forward)
561
+ c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
562
+ return c
563
+
564
+ def meshgrid(self, h, w):
565
+ y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
566
+ x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
567
+
568
+ arr = torch.cat([y, x], dim=-1)
569
+ return arr
570
+
571
+ def delta_border(self, h, w):
572
+ """
573
+ :param h: height
574
+ :param w: width
575
+ :return: normalized distance to image border,
576
+ wtith min distance = 0 at border and max dist = 0.5 at image center
577
+ """
578
+ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
579
+ arr = self.meshgrid(h, w) / lower_right_corner
580
+ dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
581
+ dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
582
+ edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
583
+ return edge_dist
584
+
585
+ def get_weighting(self, h, w, Ly, Lx, device):
586
+ weighting = self.delta_border(h, w)
587
+ weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
588
+ self.split_input_params["clip_max_weight"], )
589
+ weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
590
+
591
+ if self.split_input_params["tie_braker"]:
592
+ L_weighting = self.delta_border(Ly, Lx)
593
+ L_weighting = torch.clip(L_weighting,
594
+ self.split_input_params["clip_min_tie_weight"],
595
+ self.split_input_params["clip_max_tie_weight"])
596
+
597
+ L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
598
+ weighting = weighting * L_weighting
599
+ return weighting
600
+
601
+ def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
602
+ """
603
+ :param x: img of size (bs, c, h, w)
604
+ :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
605
+ """
606
+ bs, nc, h, w = x.shape
607
+
608
+ # number of crops in image
609
+ Ly = (h - kernel_size[0]) // stride[0] + 1
610
+ Lx = (w - kernel_size[1]) // stride[1] + 1
611
+
612
+ if uf == 1 and df == 1:
613
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
614
+ unfold = torch.nn.Unfold(**fold_params)
615
+
616
+ fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
617
+
618
+ weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
619
+ normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
620
+ weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
621
+
622
+ elif uf > 1 and df == 1:
623
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
624
+ unfold = torch.nn.Unfold(**fold_params)
625
+
626
+ fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
627
+ dilation=1, padding=0,
628
+ stride=(stride[0] * uf, stride[1] * uf))
629
+ fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
630
+
631
+ weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
632
+ normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
633
+ weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
634
+
635
+ elif df > 1 and uf == 1:
636
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
637
+ unfold = torch.nn.Unfold(**fold_params)
638
+
639
+ fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
640
+ dilation=1, padding=0,
641
+ stride=(stride[0] // df, stride[1] // df))
642
+ fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
643
+
644
+ weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
645
+ normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
646
+ weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
647
+
648
+ else:
649
+ raise NotImplementedError
650
+
651
+ return fold, unfold, normalization, weighting
652
+
653
+ # @torch.no_grad()
654
+ def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
655
+ cond_key=None, return_original_cond=False, bs=None):
656
+ x = super().get_input(batch, k)
657
+ if bs is not None:
658
+ x = x[:bs]
659
+ x = x.to(self.device)
660
+ encoder_posterior = self.encode_first_stage(x)
661
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
662
+
663
+ if self.model.conditioning_key is not None:
664
+ if cond_key is None:
665
+ cond_key = self.cond_stage_key
666
+ if cond_key != self.first_stage_key:
667
+ if cond_key in ['caption', 'coordinates_bbox']:
668
+ xc = batch[cond_key]
669
+ elif cond_key == 'class_label':
670
+ xc = batch
671
+ else:
672
+ xc = super().get_input(batch, cond_key).to(self.device)
673
+ else:
674
+ xc = x
675
+ if not self.cond_stage_trainable or force_c_encode:
676
+ if isinstance(xc, dict) or isinstance(xc, list):
677
+ # import pudb; pudb.set_trace()
678
+ c = self.get_learned_conditioning(xc)
679
+ else:
680
+ c = self.get_learned_conditioning(xc.to(self.device))
681
+ else:
682
+ c = xc
683
+ if bs is not None:
684
+ c = c[:bs]
685
+
686
+ if self.use_positional_encodings:
687
+ pos_x, pos_y = self.compute_latent_shifts(batch)
688
+ ckey = __conditioning_keys__[self.model.conditioning_key]
689
+ c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
690
+
691
+ else:
692
+ c = None
693
+ xc = None
694
+ if self.use_positional_encodings:
695
+ pos_x, pos_y = self.compute_latent_shifts(batch)
696
+ c = {'pos_x': pos_x, 'pos_y': pos_y}
697
+ out = [z, c]
698
+ if return_first_stage_outputs:
699
+ xrec = self.decode_first_stage(z)
700
+ out.extend([x, xrec])
701
+ if return_original_cond:
702
+ out.append(xc)
703
+ return out
704
+
705
+ # @torch.no_grad()
706
+ def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
707
+ if predict_cids:
708
+ if z.dim() == 4:
709
+ z = torch.argmax(z.exp(), dim=1).long()
710
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
711
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
712
+
713
+ z = 1. / self.scale_factor * z
714
+
715
+ if hasattr(self, "split_input_params"):
716
+ if self.split_input_params["patch_distributed_vq"]:
717
+ ks = self.split_input_params["ks"] # eg. (128, 128)
718
+ stride = self.split_input_params["stride"] # eg. (64, 64)
719
+ uf = self.split_input_params["vqf"]
720
+ bs, nc, h, w = z.shape
721
+ if ks[0] > h or ks[1] > w:
722
+ ks = (min(ks[0], h), min(ks[1], w))
723
+ print("reducing Kernel")
724
+
725
+ if stride[0] > h or stride[1] > w:
726
+ stride = (min(stride[0], h), min(stride[1], w))
727
+ print("reducing stride")
728
+
729
+ fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
730
+
731
+ z = unfold(z) # (bn, nc * prod(**ks), L)
732
+ # 1. Reshape to img shape
733
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
734
+
735
+ # 2. apply model loop over last dim
736
+ if isinstance(self.first_stage_model, VQModelInterface):
737
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
738
+ force_not_quantize=predict_cids or force_not_quantize)
739
+ for i in range(z.shape[-1])]
740
+ else:
741
+
742
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
743
+ for i in range(z.shape[-1])]
744
+
745
+ o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
746
+ o = o * weighting
747
+ # Reverse 1. reshape to img shape
748
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
749
+ # stitch crops together
750
+ decoded = fold(o)
751
+ decoded = decoded / normalization # norm is shape (1, 1, h, w)
752
+ return decoded
753
+ else:
754
+ if isinstance(self.first_stage_model, VQModelInterface):
755
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
756
+ else:
757
+ return self.first_stage_model.decode(z)
758
+
759
+ else:
760
+ if isinstance(self.first_stage_model, VQModelInterface):
761
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
762
+ else:
763
+ return self.first_stage_model.decode(z)
764
+
765
+ # same as above but without decorator
766
+ def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
767
+ if predict_cids:
768
+ if z.dim() == 4:
769
+ z = torch.argmax(z.exp(), dim=1).long()
770
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
771
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
772
+
773
+ z = 1. / self.scale_factor * z
774
+
775
+ if hasattr(self, "split_input_params"):
776
+ if self.split_input_params["patch_distributed_vq"]:
777
+ ks = self.split_input_params["ks"] # eg. (128, 128)
778
+ stride = self.split_input_params["stride"] # eg. (64, 64)
779
+ uf = self.split_input_params["vqf"]
780
+ bs, nc, h, w = z.shape
781
+ if ks[0] > h or ks[1] > w:
782
+ ks = (min(ks[0], h), min(ks[1], w))
783
+ print("reducing Kernel")
784
+
785
+ if stride[0] > h or stride[1] > w:
786
+ stride = (min(stride[0], h), min(stride[1], w))
787
+ print("reducing stride")
788
+
789
+ fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
790
+
791
+ z = unfold(z) # (bn, nc * prod(**ks), L)
792
+ # 1. Reshape to img shape
793
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
794
+
795
+ # 2. apply model loop over last dim
796
+ if isinstance(self.first_stage_model, VQModelInterface):
797
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
798
+ force_not_quantize=predict_cids or force_not_quantize)
799
+ for i in range(z.shape[-1])]
800
+ else:
801
+
802
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
803
+ for i in range(z.shape[-1])]
804
+
805
+ o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
806
+ o = o * weighting
807
+ # Reverse 1. reshape to img shape
808
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
809
+ # stitch crops together
810
+ decoded = fold(o)
811
+ decoded = decoded / normalization # norm is shape (1, 1, h, w)
812
+ return decoded
813
+ else:
814
+ if isinstance(self.first_stage_model, VQModelInterface):
815
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
816
+ else:
817
+ return self.first_stage_model.decode(z)
818
+
819
+ else:
820
+ if isinstance(self.first_stage_model, VQModelInterface):
821
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
822
+ else:
823
+ return self.first_stage_model.decode(z)
824
+
825
+ # @torch.no_grad()
826
+ def encode_first_stage(self, x):
827
+ if hasattr(self, "split_input_params"):
828
+ if self.split_input_params["patch_distributed_vq"]:
829
+ ks = self.split_input_params["ks"] # eg. (128, 128)
830
+ stride = self.split_input_params["stride"] # eg. (64, 64)
831
+ df = self.split_input_params["vqf"]
832
+ self.split_input_params['original_image_size'] = x.shape[-2:]
833
+ bs, nc, h, w = x.shape
834
+ if ks[0] > h or ks[1] > w:
835
+ ks = (min(ks[0], h), min(ks[1], w))
836
+ print("reducing Kernel")
837
+
838
+ if stride[0] > h or stride[1] > w:
839
+ stride = (min(stride[0], h), min(stride[1], w))
840
+ print("reducing stride")
841
+
842
+ fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
843
+ z = unfold(x) # (bn, nc * prod(**ks), L)
844
+ # Reshape to img shape
845
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
846
+
847
+ output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
848
+ for i in range(z.shape[-1])]
849
+
850
+ o = torch.stack(output_list, axis=-1)
851
+ o = o * weighting
852
+
853
+ # Reverse reshape to img shape
854
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
855
+ # stitch crops together
856
+ decoded = fold(o)
857
+ decoded = decoded / normalization
858
+ return decoded
859
+
860
+ else:
861
+ return self.first_stage_model.encode(x)
862
+ else:
863
+ return self.first_stage_model.encode(x)
864
+
865
+ def shared_step(self, batch, **kwargs):
866
+ x, c = self.get_input(batch, self.first_stage_key)
867
+ loss = self(x, c)
868
+ return loss
869
+
870
+ def forward(self, x, c, *args, **kwargs):
871
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
872
+ if self.model.conditioning_key is not None:
873
+ assert c is not None
874
+ if self.cond_stage_trainable:
875
+ c = self.get_learned_conditioning(c)
876
+ if self.shorten_cond_schedule: # TODO: drop this option
877
+ tc = self.cond_ids[t].to(self.device)
878
+ c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
879
+ return self.p_losses(x, c, t, *args, **kwargs)
880
+
881
+ def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
882
+ def rescale_bbox(bbox):
883
+ x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
884
+ y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
885
+ w = min(bbox[2] / crop_coordinates[2], 1 - x0)
886
+ h = min(bbox[3] / crop_coordinates[3], 1 - y0)
887
+ return x0, y0, w, h
888
+
889
+ return [rescale_bbox(b) for b in bboxes]
890
+
891
+ def apply_model(self, x_noisy, t, cond, return_ids=False):
892
+
893
+ if isinstance(cond, dict):
894
+ # hybrid case, cond is exptected to be a dict
895
+ pass
896
+ else:
897
+ if not isinstance(cond, list):
898
+ cond = [cond]
899
+ key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
900
+ cond = {key: cond}
901
+
902
+ if hasattr(self, "split_input_params"):
903
+ assert len(cond) == 1 # todo can only deal with one conditioning atm
904
+ assert not return_ids
905
+ ks = self.split_input_params["ks"] # eg. (128, 128)
906
+ stride = self.split_input_params["stride"] # eg. (64, 64)
907
+
908
+ h, w = x_noisy.shape[-2:]
909
+
910
+ fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
911
+
912
+ z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
913
+ # Reshape to img shape
914
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
915
+ z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
916
+
917
+ if self.cond_stage_key in ["image", "LR_image", "segmentation",
918
+ 'bbox_img'] and self.model.conditioning_key: # todo check for completeness
919
+ c_key = next(iter(cond.keys())) # get key
920
+ c = next(iter(cond.values())) # get value
921
+ assert (len(c) == 1) # todo extend to list with more than one elem
922
+ c = c[0] # get element
923
+
924
+ c = unfold(c)
925
+ c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
926
+
927
+ cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
928
+
929
+ elif self.cond_stage_key == 'coordinates_bbox':
930
+ assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
931
+
932
+ # assuming padding of unfold is always 0 and its dilation is always 1
933
+ n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
934
+ full_img_h, full_img_w = self.split_input_params['original_image_size']
935
+ # as we are operating on latents, we need the factor from the original image size to the
936
+ # spatial latent size to properly rescale the crops for regenerating the bbox annotations
937
+ num_downs = self.first_stage_model.encoder.num_resolutions - 1
938
+ rescale_latent = 2 ** (num_downs)
939
+
940
+ # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
941
+ # need to rescale the tl patch coordinates to be in between (0,1)
942
+ tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
943
+ rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
944
+ for patch_nr in range(z.shape[-1])]
945
+
946
+ # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
947
+ patch_limits = [(x_tl, y_tl,
948
+ rescale_latent * ks[0] / full_img_w,
949
+ rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
950
+ # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
951
+
952
+ # tokenize crop coordinates for the bounding boxes of the respective patches
953
+ patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
954
+ for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
955
+ print(patch_limits_tknzd[0].shape)
956
+ # cut tknzd crop position from conditioning
957
+ assert isinstance(cond, dict), 'cond must be dict to be fed into model'
958
+ cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
959
+ print(cut_cond.shape)
960
+
961
+ adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
962
+ adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
963
+ print(adapted_cond.shape)
964
+ adapted_cond = self.get_learned_conditioning(adapted_cond)
965
+ print(adapted_cond.shape)
966
+ adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
967
+ print(adapted_cond.shape)
968
+
969
+ cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
970
+
971
+ else:
972
+ cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
973
+
974
+ # apply model by loop over crops
975
+ output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
976
+ assert not isinstance(output_list[0],
977
+ tuple) # todo cant deal with multiple model outputs check this never happens
978
+
979
+ o = torch.stack(output_list, axis=-1)
980
+ o = o * weighting
981
+ # Reverse reshape to img shape
982
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
983
+ # stitch crops together
984
+ x_recon = fold(o) / normalization
985
+
986
+ else:
987
+ x_recon = self.model(x_noisy, t, **cond)
988
+
989
+ if isinstance(x_recon, tuple) and not return_ids:
990
+ return x_recon[0]
991
+ else:
992
+ return x_recon
993
+
994
+ def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
995
+ return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
996
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
997
+
998
+ def _prior_bpd(self, x_start):
999
+ """
1000
+ Get the prior KL term for the variational lower-bound, measured in
1001
+ bits-per-dim.
1002
+ This term can't be optimized, as it only depends on the encoder.
1003
+ :param x_start: the [N x C x ...] tensor of inputs.
1004
+ :return: a batch of [N] KL values (in bits), one per batch element.
1005
+ """
1006
+ batch_size = x_start.shape[0]
1007
+ t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
1008
+ qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
1009
+ kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
1010
+ return mean_flat(kl_prior) / np.log(2.0)
1011
+
1012
+ def p_losses(self, x_start, cond, t, noise=None):
1013
+ noise = default(noise, lambda: torch.randn_like(x_start))
1014
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
1015
+ model_output = self.apply_model(x_noisy, t, cond)
1016
+
1017
+ loss_dict = {}
1018
+ prefix = 'train' if self.training else 'val'
1019
+
1020
+ if self.parameterization == "x0":
1021
+ target = x_start
1022
+ elif self.parameterization == "eps":
1023
+ target = noise
1024
+ else:
1025
+ raise NotImplementedError()
1026
+
1027
+ loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
1028
+ loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
1029
+
1030
+ logvar_t = self.logvar[t].to(self.device)
1031
+ loss = loss_simple / torch.exp(logvar_t) + logvar_t
1032
+ # loss = loss_simple / torch.exp(self.logvar) + self.logvar
1033
+ if self.learn_logvar:
1034
+ loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
1035
+ loss_dict.update({'logvar': self.logvar.data.mean()})
1036
+
1037
+ loss = self.l_simple_weight * loss.mean()
1038
+
1039
+ loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
1040
+ loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
1041
+ loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
1042
+ loss += (self.original_elbo_weight * loss_vlb)
1043
+ loss_dict.update({f'{prefix}/loss': loss})
1044
+
1045
+ return loss, loss_dict
1046
+
1047
+ def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
1048
+ return_x0=False, score_corrector=None, corrector_kwargs=None):
1049
+ t_in = t
1050
+ model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
1051
+
1052
+ if score_corrector is not None:
1053
+ assert self.parameterization == "eps"
1054
+ model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
1055
+
1056
+ if return_codebook_ids:
1057
+ model_out, logits = model_out
1058
+
1059
+ if self.parameterization == "eps":
1060
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
1061
+ elif self.parameterization == "x0":
1062
+ x_recon = model_out
1063
+ else:
1064
+ raise NotImplementedError()
1065
+
1066
+ if clip_denoised:
1067
+ x_recon.clamp_(-1., 1.)
1068
+ if quantize_denoised:
1069
+ x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
1070
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
1071
+ if return_codebook_ids:
1072
+ return model_mean, posterior_variance, posterior_log_variance, logits
1073
+ elif return_x0:
1074
+ return model_mean, posterior_variance, posterior_log_variance, x_recon
1075
+ else:
1076
+ return model_mean, posterior_variance, posterior_log_variance
1077
+
1078
+ # @torch.no_grad()
1079
+ def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
1080
+ return_codebook_ids=False, quantize_denoised=False, return_x0=False,
1081
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
1082
+ b, *_, device = *x.shape, x.device
1083
+ outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
1084
+ return_codebook_ids=return_codebook_ids,
1085
+ quantize_denoised=quantize_denoised,
1086
+ return_x0=return_x0,
1087
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1088
+ if return_codebook_ids:
1089
+ raise DeprecationWarning("Support dropped.")
1090
+ model_mean, _, model_log_variance, logits = outputs
1091
+ elif return_x0:
1092
+ model_mean, _, model_log_variance, x0 = outputs
1093
+ else:
1094
+ model_mean, _, model_log_variance = outputs
1095
+
1096
+ noise = noise_like(x.shape, device, repeat_noise) * temperature
1097
+ if noise_dropout > 0.:
1098
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
1099
+ # no noise when t == 0
1100
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
1101
+
1102
+ if return_codebook_ids:
1103
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
1104
+ if return_x0:
1105
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
1106
+ else:
1107
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
1108
+
1109
+ # @torch.no_grad()
1110
+ def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
1111
+ img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
1112
+ score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
1113
+ log_every_t=None):
1114
+ if not log_every_t:
1115
+ log_every_t = self.log_every_t
1116
+ timesteps = self.num_timesteps
1117
+ if batch_size is not None:
1118
+ b = batch_size if batch_size is not None else shape[0]
1119
+ shape = [batch_size] + list(shape)
1120
+ else:
1121
+ b = batch_size = shape[0]
1122
+ if x_T is None:
1123
+ img = torch.randn(shape, device=self.device)
1124
+ else:
1125
+ img = x_T
1126
+ intermediates = []
1127
+ if cond is not None:
1128
+ if isinstance(cond, dict):
1129
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1130
+ list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1131
+ else:
1132
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1133
+
1134
+ if start_T is not None:
1135
+ timesteps = min(timesteps, start_T)
1136
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
1137
+ total=timesteps) if verbose else reversed(
1138
+ range(0, timesteps))
1139
+ if type(temperature) == float:
1140
+ temperature = [temperature] * timesteps
1141
+
1142
+ for i in iterator:
1143
+ ts = torch.full((b,), i, device=self.device, dtype=torch.long)
1144
+ if self.shorten_cond_schedule:
1145
+ assert self.model.conditioning_key != 'hybrid'
1146
+ tc = self.cond_ids[ts].to(cond.device)
1147
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1148
+
1149
+ img, x0_partial = self.p_sample(img, cond, ts,
1150
+ clip_denoised=self.clip_denoised,
1151
+ quantize_denoised=quantize_denoised, return_x0=True,
1152
+ temperature=temperature[i], noise_dropout=noise_dropout,
1153
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1154
+ if mask is not None:
1155
+ assert x0 is not None
1156
+ img_orig = self.q_sample(x0, ts)
1157
+ img = img_orig * mask + (1. - mask) * img
1158
+
1159
+ if i % log_every_t == 0 or i == timesteps - 1:
1160
+ intermediates.append(x0_partial)
1161
+ if callback: callback(i)
1162
+ if img_callback: img_callback(img, i)
1163
+ return img, intermediates
1164
+
1165
+ # @torch.no_grad()
1166
+ def p_sample_loop(self, cond, shape, return_intermediates=False,
1167
+ x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
1168
+ mask=None, x0=None, img_callback=None, start_T=None,
1169
+ log_every_t=None):
1170
+
1171
+ if not log_every_t:
1172
+ log_every_t = self.log_every_t
1173
+ device = self.betas.device
1174
+ b = shape[0]
1175
+ if x_T is None:
1176
+ img = torch.randn(shape, device=device)
1177
+ else:
1178
+ img = x_T
1179
+
1180
+ intermediates = [img]
1181
+ if timesteps is None:
1182
+ timesteps = self.num_timesteps
1183
+
1184
+ if start_T is not None:
1185
+ timesteps = min(timesteps, start_T)
1186
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
1187
+ range(0, timesteps))
1188
+
1189
+ if mask is not None:
1190
+ assert x0 is not None
1191
+ assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
1192
+
1193
+ for i in iterator:
1194
+ ts = torch.full((b,), i, device=device, dtype=torch.long)
1195
+ if self.shorten_cond_schedule:
1196
+ assert self.model.conditioning_key != 'hybrid'
1197
+ tc = self.cond_ids[ts].to(cond.device)
1198
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1199
+
1200
+ img = self.p_sample(img, cond, ts,
1201
+ clip_denoised=self.clip_denoised,
1202
+ quantize_denoised=quantize_denoised)
1203
+ if mask is not None:
1204
+ img_orig = self.q_sample(x0, ts)
1205
+ img = img_orig * mask + (1. - mask) * img
1206
+
1207
+ if i % log_every_t == 0 or i == timesteps - 1:
1208
+ intermediates.append(img)
1209
+ if callback: callback(i)
1210
+ if img_callback: img_callback(img, i)
1211
+
1212
+ if return_intermediates:
1213
+ return img, intermediates
1214
+ return img
1215
+
1216
+ # @torch.no_grad()
1217
+ def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
1218
+ verbose=True, timesteps=None, quantize_denoised=False,
1219
+ mask=None, x0=None, shape=None,**kwargs):
1220
+ if shape is None:
1221
+ shape = (batch_size, self.channels, self.image_size, self.image_size)
1222
+ if cond is not None:
1223
+ if isinstance(cond, dict):
1224
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1225
+ list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1226
+ else:
1227
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1228
+ return self.p_sample_loop(cond,
1229
+ shape,
1230
+ return_intermediates=return_intermediates, x_T=x_T,
1231
+ verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
1232
+ mask=mask, x0=x0)
1233
+
1234
+ # @torch.no_grad()
1235
+ def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
1236
+
1237
+ if ddim:
1238
+ ddim_sampler = DDIMSampler(self)
1239
+ shape = (self.channels, self.image_size, self.image_size)
1240
+ samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
1241
+ shape,cond,verbose=False,**kwargs)
1242
+
1243
+ else:
1244
+ samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
1245
+ return_intermediates=True,**kwargs)
1246
+
1247
+ return samples, intermediates
1248
+
1249
+
1250
+ # @torch.no_grad()
1251
+ def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1252
+ quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1253
+ plot_diffusion_rows=True, **kwargs):
1254
+
1255
+ use_ddim = ddim_steps is not None
1256
+
1257
+ log = dict()
1258
+ z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
1259
+ return_first_stage_outputs=True,
1260
+ force_c_encode=True,
1261
+ return_original_cond=True,
1262
+ bs=N)
1263
+ N = min(x.shape[0], N)
1264
+ n_row = min(x.shape[0], n_row)
1265
+ log["inputs"] = x
1266
+ log["reconstruction"] = xrec
1267
+ if self.model.conditioning_key is not None:
1268
+ if hasattr(self.cond_stage_model, "decode"):
1269
+ xc = self.cond_stage_model.decode(c)
1270
+ log["conditioning"] = xc
1271
+ elif self.cond_stage_key in ["caption"]:
1272
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
1273
+ log["conditioning"] = xc
1274
+ elif self.cond_stage_key == 'class_label':
1275
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
1276
+ log['conditioning'] = xc
1277
+ elif isimage(xc):
1278
+ log["conditioning"] = xc
1279
+ if ismap(xc):
1280
+ log["original_conditioning"] = self.to_rgb(xc)
1281
+
1282
+ if plot_diffusion_rows:
1283
+ # get diffusion row
1284
+ diffusion_row = list()
1285
+ z_start = z[:n_row]
1286
+ for t in range(self.num_timesteps):
1287
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1288
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1289
+ t = t.to(self.device).long()
1290
+ noise = torch.randn_like(z_start)
1291
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1292
+ diffusion_row.append(self.decode_first_stage(z_noisy))
1293
+
1294
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1295
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1296
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1297
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1298
+ log["diffusion_row"] = diffusion_grid
1299
+
1300
+ if sample:
1301
+ # get denoise row
1302
+ with self.ema_scope("Plotting"):
1303
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1304
+ ddim_steps=ddim_steps,eta=ddim_eta)
1305
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1306
+ x_samples = self.decode_first_stage(samples)
1307
+ log["samples"] = x_samples
1308
+ if plot_denoise_rows:
1309
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1310
+ log["denoise_row"] = denoise_grid
1311
+
1312
+ if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
1313
+ self.first_stage_model, IdentityFirstStage):
1314
+ # also display when quantizing x0 while sampling
1315
+ with self.ema_scope("Plotting Quantized Denoised"):
1316
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1317
+ ddim_steps=ddim_steps,eta=ddim_eta,
1318
+ quantize_denoised=True)
1319
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
1320
+ # quantize_denoised=True)
1321
+ x_samples = self.decode_first_stage(samples.to(self.device))
1322
+ log["samples_x0_quantized"] = x_samples
1323
+
1324
+ if inpaint:
1325
+ # make a simple center square
1326
+ b, h, w = z.shape[0], z.shape[2], z.shape[3]
1327
+ mask = torch.ones(N, h, w).to(self.device)
1328
+ # zeros will be filled in
1329
+ mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1330
+ mask = mask[:, None, ...]
1331
+ with self.ema_scope("Plotting Inpaint"):
1332
+
1333
+ samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
1334
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1335
+ x_samples = self.decode_first_stage(samples.to(self.device))
1336
+ log["samples_inpainting"] = x_samples
1337
+ log["mask"] = mask
1338
+
1339
+ # outpaint
1340
+ with self.ema_scope("Plotting Outpaint"):
1341
+ samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
1342
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1343
+ x_samples = self.decode_first_stage(samples.to(self.device))
1344
+ log["samples_outpainting"] = x_samples
1345
+
1346
+ if plot_progressive_rows:
1347
+ with self.ema_scope("Plotting Progressives"):
1348
+ img, progressives = self.progressive_denoising(c,
1349
+ shape=(self.channels, self.image_size, self.image_size),
1350
+ batch_size=N)
1351
+ prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1352
+ log["progressive_row"] = prog_row
1353
+
1354
+ if return_keys:
1355
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1356
+ return log
1357
+ else:
1358
+ return {key: log[key] for key in return_keys}
1359
+ return log
1360
+
1361
+ def configure_optimizers(self):
1362
+ lr = self.learning_rate
1363
+ params = list(self.model.parameters())
1364
+ if self.cond_stage_trainable:
1365
+ print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1366
+ params = params + list(self.cond_stage_model.parameters())
1367
+ if self.learn_logvar:
1368
+ print('Diffusion model optimizing logvar')
1369
+ params.append(self.logvar)
1370
+ opt = torch.optim.AdamW(params, lr=lr)
1371
+ if self.use_scheduler:
1372
+ assert 'target' in self.scheduler_config
1373
+ scheduler = instantiate_from_config(self.scheduler_config)
1374
+
1375
+ print("Setting up LambdaLR scheduler...")
1376
+ scheduler = [
1377
+ {
1378
+ 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1379
+ 'interval': 'step',
1380
+ 'frequency': 1
1381
+ }]
1382
+ return [opt], scheduler
1383
+ return opt
1384
+
1385
+ # @torch.no_grad()
1386
+ def to_rgb(self, x):
1387
+ x = x.float()
1388
+ if not hasattr(self, "colorize"):
1389
+ self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1390
+ x = nn.functional.conv2d(x, weight=self.colorize)
1391
+ x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1392
+ return x
1393
+
1394
+
1395
+ class DiffusionWrapper(pl.LightningModule):
1396
+ def __init__(self, diff_model_config, conditioning_key):
1397
+ super().__init__()
1398
+ self.diffusion_model = instantiate_from_config(diff_model_config)
1399
+ self.conditioning_key = conditioning_key
1400
+ assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
1401
+
1402
+ def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
1403
+ if self.conditioning_key is None:
1404
+ out = self.diffusion_model(x, t)
1405
+ elif self.conditioning_key == 'concat':
1406
+ xc = torch.cat([x] + c_concat, dim=1)
1407
+ out = self.diffusion_model(xc, t)
1408
+ elif self.conditioning_key == 'crossattn':
1409
+ cc = torch.cat(c_crossattn, 1)
1410
+ out = self.diffusion_model(x, t, context=cc)
1411
+ elif self.conditioning_key == 'hybrid':
1412
+ xc = torch.cat([x] + c_concat, dim=1)
1413
+ cc = torch.cat(c_crossattn, 1)
1414
+ out = self.diffusion_model(xc, t, context=cc)
1415
+ elif self.conditioning_key == 'adm':
1416
+ cc = c_crossattn[0]
1417
+ out = self.diffusion_model(x, t, y=cc)
1418
+ else:
1419
+ raise NotImplementedError()
1420
+
1421
+ return out
1422
+
1423
+
1424
+ class Layout2ImgDiffusion(LatentDiffusion):
1425
+ # TODO: move all layout-specific hacks to this class
1426
+ def __init__(self, cond_stage_key, *args, **kwargs):
1427
+ assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
1428
+ super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs)
1429
+
1430
+ def log_images(self, batch, N=8, *args, **kwargs):
1431
+ logs = super().log_images(batch=batch, N=N, *args, **kwargs)
1432
+
1433
+ key = 'train' if self.training else 'validation'
1434
+ dset = self.trainer.datamodule.datasets[key]
1435
+ mapper = dset.conditional_builders[self.cond_stage_key]
1436
+
1437
+ bbox_imgs = []
1438
+ map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
1439
+ for tknzd_bbox in batch[self.cond_stage_key][:N]:
1440
+ bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
1441
+ bbox_imgs.append(bboximg)
1442
+
1443
+ cond_img = torch.stack(bbox_imgs, dim=0)
1444
+ logs['bbox_image'] = cond_img
1445
+ return logs
stable-diffusion/ldm/models/diffusion/dpm_solver/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .sampler import DPMSolverSampler
stable-diffusion/ldm/models/diffusion/dpm_solver/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (228 Bytes). View file
 
stable-diffusion/ldm/models/diffusion/dpm_solver/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (228 Bytes). View file
 
stable-diffusion/ldm/models/diffusion/dpm_solver/__pycache__/dpm_solver.cpython-38.pyc ADDED
Binary file (51.4 kB). View file
 
stable-diffusion/ldm/models/diffusion/dpm_solver/__pycache__/dpm_solver.cpython-39.pyc ADDED
Binary file (51.4 kB). View file
 
stable-diffusion/ldm/models/diffusion/dpm_solver/__pycache__/sampler.cpython-38.pyc ADDED
Binary file (2.65 kB). View file
 
stable-diffusion/ldm/models/diffusion/dpm_solver/__pycache__/sampler.cpython-39.pyc ADDED
Binary file (2.65 kB). View file
 
stable-diffusion/ldm/models/diffusion/dpm_solver/dpm_solver.py ADDED
@@ -0,0 +1,1184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import math
4
+
5
+
6
+ class NoiseScheduleVP:
7
+ def __init__(
8
+ self,
9
+ schedule='discrete',
10
+ betas=None,
11
+ alphas_cumprod=None,
12
+ continuous_beta_0=0.1,
13
+ continuous_beta_1=20.,
14
+ ):
15
+ """Create a wrapper class for the forward SDE (VP type).
16
+
17
+ ***
18
+ Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
19
+ We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
20
+ ***
21
+
22
+ The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
23
+ We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
24
+ Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
25
+
26
+ log_alpha_t = self.marginal_log_mean_coeff(t)
27
+ sigma_t = self.marginal_std(t)
28
+ lambda_t = self.marginal_lambda(t)
29
+
30
+ Moreover, as lambda(t) is an invertible function, we also support its inverse function:
31
+
32
+ t = self.inverse_lambda(lambda_t)
33
+
34
+ ===============================================================
35
+
36
+ We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
37
+
38
+ 1. For discrete-time DPMs:
39
+
40
+ For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
41
+ t_i = (i + 1) / N
42
+ e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
43
+ We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
44
+
45
+ Args:
46
+ betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
47
+ alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
48
+
49
+ Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
50
+
51
+ **Important**: Please pay special attention for the args for `alphas_cumprod`:
52
+ The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
53
+ q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
54
+ Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
55
+ alpha_{t_n} = \sqrt{\hat{alpha_n}},
56
+ and
57
+ log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
58
+
59
+
60
+ 2. For continuous-time DPMs:
61
+
62
+ We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
63
+ schedule are the default settings in DDPM and improved-DDPM:
64
+
65
+ Args:
66
+ beta_min: A `float` number. The smallest beta for the linear schedule.
67
+ beta_max: A `float` number. The largest beta for the linear schedule.
68
+ cosine_s: A `float` number. The hyperparameter in the cosine schedule.
69
+ cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
70
+ T: A `float` number. The ending time of the forward process.
71
+
72
+ ===============================================================
73
+
74
+ Args:
75
+ schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
76
+ 'linear' or 'cosine' for continuous-time DPMs.
77
+ Returns:
78
+ A wrapper object of the forward SDE (VP type).
79
+
80
+ ===============================================================
81
+
82
+ Example:
83
+
84
+ # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
85
+ >>> ns = NoiseScheduleVP('discrete', betas=betas)
86
+
87
+ # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
88
+ >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
89
+
90
+ # For continuous-time DPMs (VPSDE), linear schedule:
91
+ >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
92
+
93
+ """
94
+
95
+ if schedule not in ['discrete', 'linear', 'cosine']:
96
+ raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule))
97
+
98
+ self.schedule = schedule
99
+ if schedule == 'discrete':
100
+ if betas is not None:
101
+ log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
102
+ else:
103
+ assert alphas_cumprod is not None
104
+ log_alphas = 0.5 * torch.log(alphas_cumprod)
105
+ self.total_N = len(log_alphas)
106
+ self.T = 1.
107
+ self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
108
+ self.log_alpha_array = log_alphas.reshape((1, -1,))
109
+ else:
110
+ self.total_N = 1000
111
+ self.beta_0 = continuous_beta_0
112
+ self.beta_1 = continuous_beta_1
113
+ self.cosine_s = 0.008
114
+ self.cosine_beta_max = 999.
115
+ self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
116
+ self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
117
+ self.schedule = schedule
118
+ if schedule == 'cosine':
119
+ # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
120
+ # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
121
+ self.T = 0.9946
122
+ else:
123
+ self.T = 1.
124
+
125
+ def marginal_log_mean_coeff(self, t):
126
+ """
127
+ Compute log(alpha_t) of a given continuous-time label t in [0, T].
128
+ """
129
+ if self.schedule == 'discrete':
130
+ return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1))
131
+ elif self.schedule == 'linear':
132
+ return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
133
+ elif self.schedule == 'cosine':
134
+ log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
135
+ log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
136
+ return log_alpha_t
137
+
138
+ def marginal_alpha(self, t):
139
+ """
140
+ Compute alpha_t of a given continuous-time label t in [0, T].
141
+ """
142
+ return torch.exp(self.marginal_log_mean_coeff(t))
143
+
144
+ def marginal_std(self, t):
145
+ """
146
+ Compute sigma_t of a given continuous-time label t in [0, T].
147
+ """
148
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
149
+
150
+ def marginal_lambda(self, t):
151
+ """
152
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
153
+ """
154
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
155
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
156
+ return log_mean_coeff - log_std
157
+
158
+ def inverse_lambda(self, lamb):
159
+ """
160
+ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
161
+ """
162
+ if self.schedule == 'linear':
163
+ tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
164
+ Delta = self.beta_0**2 + tmp
165
+ return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
166
+ elif self.schedule == 'discrete':
167
+ log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
168
+ t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))
169
+ return t.reshape((-1,))
170
+ else:
171
+ log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
172
+ t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
173
+ t = t_fn(log_alpha)
174
+ return t
175
+
176
+
177
+ def model_wrapper(
178
+ model,
179
+ noise_schedule,
180
+ model_type="noise",
181
+ model_kwargs={},
182
+ guidance_type="uncond",
183
+ condition=None,
184
+ unconditional_condition=None,
185
+ guidance_scale=1.,
186
+ classifier_fn=None,
187
+ classifier_kwargs={},
188
+ ):
189
+ """Create a wrapper function for the noise prediction model.
190
+
191
+ DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
192
+ firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
193
+
194
+ We support four types of the diffusion model by setting `model_type`:
195
+
196
+ 1. "noise": noise prediction model. (Trained by predicting noise).
197
+
198
+ 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
199
+
200
+ 3. "v": velocity prediction model. (Trained by predicting the velocity).
201
+ The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
202
+
203
+ [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
204
+ arXiv preprint arXiv:2202.00512 (2022).
205
+ [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
206
+ arXiv preprint arXiv:2210.02303 (2022).
207
+
208
+ 4. "score": marginal score function. (Trained by denoising score matching).
209
+ Note that the score function and the noise prediction model follows a simple relationship:
210
+ ```
211
+ noise(x_t, t) = -sigma_t * score(x_t, t)
212
+ ```
213
+
214
+ We support three types of guided sampling by DPMs by setting `guidance_type`:
215
+ 1. "uncond": unconditional sampling by DPMs.
216
+ The input `model` has the following format:
217
+ ``
218
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
219
+ ``
220
+
221
+ 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
222
+ The input `model` has the following format:
223
+ ``
224
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
225
+ ``
226
+
227
+ The input `classifier_fn` has the following format:
228
+ ``
229
+ classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
230
+ ``
231
+
232
+ [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
233
+ in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
234
+
235
+ 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
236
+ The input `model` has the following format:
237
+ ``
238
+ model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
239
+ ``
240
+ And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
241
+
242
+ [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
243
+ arXiv preprint arXiv:2207.12598 (2022).
244
+
245
+
246
+ The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
247
+ or continuous-time labels (i.e. epsilon to T).
248
+
249
+ We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
250
+ ``
251
+ def model_fn(x, t_continuous) -> noise:
252
+ t_input = get_model_input_time(t_continuous)
253
+ return noise_pred(model, x, t_input, **model_kwargs)
254
+ ``
255
+ where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
256
+
257
+ ===============================================================
258
+
259
+ Args:
260
+ model: A diffusion model with the corresponding format described above.
261
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
262
+ model_type: A `str`. The parameterization type of the diffusion model.
263
+ "noise" or "x_start" or "v" or "score".
264
+ model_kwargs: A `dict`. A dict for the other inputs of the model function.
265
+ guidance_type: A `str`. The type of the guidance for sampling.
266
+ "uncond" or "classifier" or "classifier-free".
267
+ condition: A pytorch tensor. The condition for the guided sampling.
268
+ Only used for "classifier" or "classifier-free" guidance type.
269
+ unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
270
+ Only used for "classifier-free" guidance type.
271
+ guidance_scale: A `float`. The scale for the guided sampling.
272
+ classifier_fn: A classifier function. Only used for the classifier guidance.
273
+ classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
274
+ Returns:
275
+ A noise prediction model that accepts the noised data and the continuous time as the inputs.
276
+ """
277
+
278
+ def get_model_input_time(t_continuous):
279
+ """
280
+ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
281
+ For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
282
+ For continuous-time DPMs, we just use `t_continuous`.
283
+ """
284
+ if noise_schedule.schedule == 'discrete':
285
+ return (t_continuous - 1. / noise_schedule.total_N) * 1000.
286
+ else:
287
+ return t_continuous
288
+
289
+ def noise_pred_fn(x, t_continuous, cond=None):
290
+ if t_continuous.reshape((-1,)).shape[0] == 1:
291
+ t_continuous = t_continuous.expand((x.shape[0]))
292
+ t_input = get_model_input_time(t_continuous)
293
+ if cond is None:
294
+ output = model(x, t_input, **model_kwargs)
295
+ else:
296
+ output = model(x, t_input, cond, **model_kwargs)
297
+ if model_type == "noise":
298
+ return output
299
+ elif model_type == "x_start":
300
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
301
+ dims = x.dim()
302
+ return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
303
+ elif model_type == "v":
304
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
305
+ dims = x.dim()
306
+ return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
307
+ elif model_type == "score":
308
+ sigma_t = noise_schedule.marginal_std(t_continuous)
309
+ dims = x.dim()
310
+ return -expand_dims(sigma_t, dims) * output
311
+
312
+ def cond_grad_fn(x, t_input):
313
+ """
314
+ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
315
+ """
316
+ with torch.enable_grad():
317
+ x_in = x.detach().requires_grad_(True)
318
+ log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
319
+ return torch.autograd.grad(log_prob.sum(), x_in)[0]
320
+
321
+ def model_fn(x, t_continuous):
322
+ """
323
+ The noise predicition model function that is used for DPM-Solver.
324
+ """
325
+ if t_continuous.reshape((-1,)).shape[0] == 1:
326
+ t_continuous = t_continuous.expand((x.shape[0]))
327
+ if guidance_type == "uncond":
328
+ return noise_pred_fn(x, t_continuous)
329
+ elif guidance_type == "classifier":
330
+ assert classifier_fn is not None
331
+ t_input = get_model_input_time(t_continuous)
332
+ cond_grad = cond_grad_fn(x, t_input)
333
+ sigma_t = noise_schedule.marginal_std(t_continuous)
334
+ noise = noise_pred_fn(x, t_continuous)
335
+ return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
336
+ elif guidance_type == "classifier-free":
337
+ if guidance_scale == 1. or unconditional_condition is None:
338
+ return noise_pred_fn(x, t_continuous, cond=condition)
339
+ else:
340
+ x_in = torch.cat([x] * 2)
341
+ t_in = torch.cat([t_continuous] * 2)
342
+ c_in = torch.cat([unconditional_condition, condition])
343
+ noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
344
+ return noise_uncond + guidance_scale * (noise - noise_uncond)
345
+
346
+ assert model_type in ["noise", "x_start", "v"]
347
+ assert guidance_type in ["uncond", "classifier", "classifier-free"]
348
+ return model_fn
349
+
350
+
351
+ class DPM_Solver:
352
+ def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
353
+ """Construct a DPM-Solver.
354
+
355
+ We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
356
+ If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
357
+ If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
358
+ In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
359
+ The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
360
+
361
+ Args:
362
+ model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
363
+ ``
364
+ def model_fn(x, t_continuous):
365
+ return noise
366
+ ``
367
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
368
+ predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
369
+ thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
370
+ max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
371
+
372
+ [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
373
+ """
374
+ self.model = model_fn
375
+ self.noise_schedule = noise_schedule
376
+ self.predict_x0 = predict_x0
377
+ self.thresholding = thresholding
378
+ self.max_val = max_val
379
+
380
+ def noise_prediction_fn(self, x, t):
381
+ """
382
+ Return the noise prediction model.
383
+ """
384
+ return self.model(x, t)
385
+
386
+ def data_prediction_fn(self, x, t):
387
+ """
388
+ Return the data prediction model (with thresholding).
389
+ """
390
+ noise = self.noise_prediction_fn(x, t)
391
+ dims = x.dim()
392
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
393
+ x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
394
+ if self.thresholding:
395
+ p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
396
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
397
+ s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
398
+ x0 = torch.clamp(x0, -s, s) / s
399
+ return x0
400
+
401
+ def model_fn(self, x, t):
402
+ """
403
+ Convert the model to the noise prediction model or the data prediction model.
404
+ """
405
+ if self.predict_x0:
406
+ return self.data_prediction_fn(x, t)
407
+ else:
408
+ return self.noise_prediction_fn(x, t)
409
+
410
+ def get_time_steps(self, skip_type, t_T, t_0, N, device):
411
+ """Compute the intermediate time steps for sampling.
412
+
413
+ Args:
414
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
415
+ - 'logSNR': uniform logSNR for the time steps.
416
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
417
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
418
+ t_T: A `float`. The starting time of the sampling (default is T).
419
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
420
+ N: A `int`. The total number of the spacing of the time steps.
421
+ device: A torch device.
422
+ Returns:
423
+ A pytorch tensor of the time steps, with the shape (N + 1,).
424
+ """
425
+ if skip_type == 'logSNR':
426
+ lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
427
+ lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
428
+ logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
429
+ return self.noise_schedule.inverse_lambda(logSNR_steps)
430
+ elif skip_type == 'time_uniform':
431
+ return torch.linspace(t_T, t_0, N + 1).to(device)
432
+ elif skip_type == 'time_quadratic':
433
+ t_order = 2
434
+ t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)
435
+ return t
436
+ else:
437
+ raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
438
+
439
+ def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
440
+ """
441
+ Get the order of each step for sampling by the singlestep DPM-Solver.
442
+
443
+ We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
444
+ Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
445
+ - If order == 1:
446
+ We take `steps` of DPM-Solver-1 (i.e. DDIM).
447
+ - If order == 2:
448
+ - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
449
+ - If steps % 2 == 0, we use K steps of DPM-Solver-2.
450
+ - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
451
+ - If order == 3:
452
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
453
+ - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
454
+ - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
455
+ - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
456
+
457
+ ============================================
458
+ Args:
459
+ order: A `int`. The max order for the solver (2 or 3).
460
+ steps: A `int`. The total number of function evaluations (NFE).
461
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
462
+ - 'logSNR': uniform logSNR for the time steps.
463
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
464
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
465
+ t_T: A `float`. The starting time of the sampling (default is T).
466
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
467
+ device: A torch device.
468
+ Returns:
469
+ orders: A list of the solver order of each step.
470
+ """
471
+ if order == 3:
472
+ K = steps // 3 + 1
473
+ if steps % 3 == 0:
474
+ orders = [3,] * (K - 2) + [2, 1]
475
+ elif steps % 3 == 1:
476
+ orders = [3,] * (K - 1) + [1]
477
+ else:
478
+ orders = [3,] * (K - 1) + [2]
479
+ elif order == 2:
480
+ if steps % 2 == 0:
481
+ K = steps // 2
482
+ orders = [2,] * K
483
+ else:
484
+ K = steps // 2 + 1
485
+ orders = [2,] * (K - 1) + [1]
486
+ elif order == 1:
487
+ K = 1
488
+ orders = [1,] * steps
489
+ else:
490
+ raise ValueError("'order' must be '1' or '2' or '3'.")
491
+ if skip_type == 'logSNR':
492
+ # To reproduce the results in DPM-Solver paper
493
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
494
+ else:
495
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders)).to(device)]
496
+ return timesteps_outer, orders
497
+
498
+ def denoise_to_zero_fn(self, x, s):
499
+ """
500
+ Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
501
+ """
502
+ return self.data_prediction_fn(x, s)
503
+
504
+ def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
505
+ """
506
+ DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
507
+
508
+ Args:
509
+ x: A pytorch tensor. The initial value at time `s`.
510
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
511
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
512
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
513
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
514
+ return_intermediate: A `bool`. If true, also return the model value at time `s`.
515
+ Returns:
516
+ x_t: A pytorch tensor. The approximated solution at time `t`.
517
+ """
518
+ ns = self.noise_schedule
519
+ dims = x.dim()
520
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
521
+ h = lambda_t - lambda_s
522
+ log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
523
+ sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
524
+ alpha_t = torch.exp(log_alpha_t)
525
+
526
+ if self.predict_x0:
527
+ phi_1 = torch.expm1(-h)
528
+ if model_s is None:
529
+ model_s = self.model_fn(x, s)
530
+ x_t = (
531
+ expand_dims(sigma_t / sigma_s, dims) * x
532
+ - expand_dims(alpha_t * phi_1, dims) * model_s
533
+ )
534
+ if return_intermediate:
535
+ return x_t, {'model_s': model_s}
536
+ else:
537
+ return x_t
538
+ else:
539
+ phi_1 = torch.expm1(h)
540
+ if model_s is None:
541
+ model_s = self.model_fn(x, s)
542
+ x_t = (
543
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
544
+ - expand_dims(sigma_t * phi_1, dims) * model_s
545
+ )
546
+ if return_intermediate:
547
+ return x_t, {'model_s': model_s}
548
+ else:
549
+ return x_t
550
+
551
+ def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpm_solver'):
552
+ """
553
+ Singlestep solver DPM-Solver-2 from time `s` to time `t`.
554
+
555
+ Args:
556
+ x: A pytorch tensor. The initial value at time `s`.
557
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
558
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
559
+ r1: A `float`. The hyperparameter of the second-order solver.
560
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
561
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
562
+ return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
563
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
564
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
565
+ Returns:
566
+ x_t: A pytorch tensor. The approximated solution at time `t`.
567
+ """
568
+ if solver_type not in ['dpm_solver', 'taylor']:
569
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
570
+ if r1 is None:
571
+ r1 = 0.5
572
+ ns = self.noise_schedule
573
+ dims = x.dim()
574
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
575
+ h = lambda_t - lambda_s
576
+ lambda_s1 = lambda_s + r1 * h
577
+ s1 = ns.inverse_lambda(lambda_s1)
578
+ log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t)
579
+ sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
580
+ alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
581
+
582
+ if self.predict_x0:
583
+ phi_11 = torch.expm1(-r1 * h)
584
+ phi_1 = torch.expm1(-h)
585
+
586
+ if model_s is None:
587
+ model_s = self.model_fn(x, s)
588
+ x_s1 = (
589
+ expand_dims(sigma_s1 / sigma_s, dims) * x
590
+ - expand_dims(alpha_s1 * phi_11, dims) * model_s
591
+ )
592
+ model_s1 = self.model_fn(x_s1, s1)
593
+ if solver_type == 'dpm_solver':
594
+ x_t = (
595
+ expand_dims(sigma_t / sigma_s, dims) * x
596
+ - expand_dims(alpha_t * phi_1, dims) * model_s
597
+ - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
598
+ )
599
+ elif solver_type == 'taylor':
600
+ x_t = (
601
+ expand_dims(sigma_t / sigma_s, dims) * x
602
+ - expand_dims(alpha_t * phi_1, dims) * model_s
603
+ + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (model_s1 - model_s)
604
+ )
605
+ else:
606
+ phi_11 = torch.expm1(r1 * h)
607
+ phi_1 = torch.expm1(h)
608
+
609
+ if model_s is None:
610
+ model_s = self.model_fn(x, s)
611
+ x_s1 = (
612
+ expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
613
+ - expand_dims(sigma_s1 * phi_11, dims) * model_s
614
+ )
615
+ model_s1 = self.model_fn(x_s1, s1)
616
+ if solver_type == 'dpm_solver':
617
+ x_t = (
618
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
619
+ - expand_dims(sigma_t * phi_1, dims) * model_s
620
+ - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
621
+ )
622
+ elif solver_type == 'taylor':
623
+ x_t = (
624
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
625
+ - expand_dims(sigma_t * phi_1, dims) * model_s
626
+ - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
627
+ )
628
+ if return_intermediate:
629
+ return x_t, {'model_s': model_s, 'model_s1': model_s1}
630
+ else:
631
+ return x_t
632
+
633
+ def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpm_solver'):
634
+ """
635
+ Singlestep solver DPM-Solver-3 from time `s` to time `t`.
636
+
637
+ Args:
638
+ x: A pytorch tensor. The initial value at time `s`.
639
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
640
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
641
+ r1: A `float`. The hyperparameter of the third-order solver.
642
+ r2: A `float`. The hyperparameter of the third-order solver.
643
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
644
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
645
+ model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
646
+ If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
647
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
648
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
649
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
650
+ Returns:
651
+ x_t: A pytorch tensor. The approximated solution at time `t`.
652
+ """
653
+ if solver_type not in ['dpm_solver', 'taylor']:
654
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
655
+ if r1 is None:
656
+ r1 = 1. / 3.
657
+ if r2 is None:
658
+ r2 = 2. / 3.
659
+ ns = self.noise_schedule
660
+ dims = x.dim()
661
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
662
+ h = lambda_t - lambda_s
663
+ lambda_s1 = lambda_s + r1 * h
664
+ lambda_s2 = lambda_s + r2 * h
665
+ s1 = ns.inverse_lambda(lambda_s1)
666
+ s2 = ns.inverse_lambda(lambda_s2)
667
+ log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
668
+ sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t)
669
+ alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
670
+
671
+ if self.predict_x0:
672
+ phi_11 = torch.expm1(-r1 * h)
673
+ phi_12 = torch.expm1(-r2 * h)
674
+ phi_1 = torch.expm1(-h)
675
+ phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
676
+ phi_2 = phi_1 / h + 1.
677
+ phi_3 = phi_2 / h - 0.5
678
+
679
+ if model_s is None:
680
+ model_s = self.model_fn(x, s)
681
+ if model_s1 is None:
682
+ x_s1 = (
683
+ expand_dims(sigma_s1 / sigma_s, dims) * x
684
+ - expand_dims(alpha_s1 * phi_11, dims) * model_s
685
+ )
686
+ model_s1 = self.model_fn(x_s1, s1)
687
+ x_s2 = (
688
+ expand_dims(sigma_s2 / sigma_s, dims) * x
689
+ - expand_dims(alpha_s2 * phi_12, dims) * model_s
690
+ + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
691
+ )
692
+ model_s2 = self.model_fn(x_s2, s2)
693
+ if solver_type == 'dpm_solver':
694
+ x_t = (
695
+ expand_dims(sigma_t / sigma_s, dims) * x
696
+ - expand_dims(alpha_t * phi_1, dims) * model_s
697
+ + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
698
+ )
699
+ elif solver_type == 'taylor':
700
+ D1_0 = (1. / r1) * (model_s1 - model_s)
701
+ D1_1 = (1. / r2) * (model_s2 - model_s)
702
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
703
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
704
+ x_t = (
705
+ expand_dims(sigma_t / sigma_s, dims) * x
706
+ - expand_dims(alpha_t * phi_1, dims) * model_s
707
+ + expand_dims(alpha_t * phi_2, dims) * D1
708
+ - expand_dims(alpha_t * phi_3, dims) * D2
709
+ )
710
+ else:
711
+ phi_11 = torch.expm1(r1 * h)
712
+ phi_12 = torch.expm1(r2 * h)
713
+ phi_1 = torch.expm1(h)
714
+ phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
715
+ phi_2 = phi_1 / h - 1.
716
+ phi_3 = phi_2 / h - 0.5
717
+
718
+ if model_s is None:
719
+ model_s = self.model_fn(x, s)
720
+ if model_s1 is None:
721
+ x_s1 = (
722
+ expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
723
+ - expand_dims(sigma_s1 * phi_11, dims) * model_s
724
+ )
725
+ model_s1 = self.model_fn(x_s1, s1)
726
+ x_s2 = (
727
+ expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
728
+ - expand_dims(sigma_s2 * phi_12, dims) * model_s
729
+ - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
730
+ )
731
+ model_s2 = self.model_fn(x_s2, s2)
732
+ if solver_type == 'dpm_solver':
733
+ x_t = (
734
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
735
+ - expand_dims(sigma_t * phi_1, dims) * model_s
736
+ - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
737
+ )
738
+ elif solver_type == 'taylor':
739
+ D1_0 = (1. / r1) * (model_s1 - model_s)
740
+ D1_1 = (1. / r2) * (model_s2 - model_s)
741
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
742
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
743
+ x_t = (
744
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
745
+ - expand_dims(sigma_t * phi_1, dims) * model_s
746
+ - expand_dims(sigma_t * phi_2, dims) * D1
747
+ - expand_dims(sigma_t * phi_3, dims) * D2
748
+ )
749
+
750
+ if return_intermediate:
751
+ return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
752
+ else:
753
+ return x_t
754
+
755
+ def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
756
+ """
757
+ Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
758
+
759
+ Args:
760
+ x: A pytorch tensor. The initial value at time `s`.
761
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
762
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
763
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
764
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
765
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
766
+ Returns:
767
+ x_t: A pytorch tensor. The approximated solution at time `t`.
768
+ """
769
+ if solver_type not in ['dpm_solver', 'taylor']:
770
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
771
+ ns = self.noise_schedule
772
+ dims = x.dim()
773
+ model_prev_1, model_prev_0 = model_prev_list
774
+ t_prev_1, t_prev_0 = t_prev_list
775
+ lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
776
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
777
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
778
+ alpha_t = torch.exp(log_alpha_t)
779
+
780
+ h_0 = lambda_prev_0 - lambda_prev_1
781
+ h = lambda_t - lambda_prev_0
782
+ r0 = h_0 / h
783
+ D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
784
+ if self.predict_x0:
785
+ if solver_type == 'dpm_solver':
786
+ x_t = (
787
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
788
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
789
+ - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
790
+ )
791
+ elif solver_type == 'taylor':
792
+ x_t = (
793
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
794
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
795
+ + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
796
+ )
797
+ else:
798
+ if solver_type == 'dpm_solver':
799
+ x_t = (
800
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
801
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
802
+ - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
803
+ )
804
+ elif solver_type == 'taylor':
805
+ x_t = (
806
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
807
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
808
+ - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
809
+ )
810
+ return x_t
811
+
812
+ def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
813
+ """
814
+ Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
815
+
816
+ Args:
817
+ x: A pytorch tensor. The initial value at time `s`.
818
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
819
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
820
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
821
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
822
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
823
+ Returns:
824
+ x_t: A pytorch tensor. The approximated solution at time `t`.
825
+ """
826
+ ns = self.noise_schedule
827
+ dims = x.dim()
828
+ model_prev_2, model_prev_1, model_prev_0 = model_prev_list
829
+ t_prev_2, t_prev_1, t_prev_0 = t_prev_list
830
+ lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
831
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
832
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
833
+ alpha_t = torch.exp(log_alpha_t)
834
+
835
+ h_1 = lambda_prev_1 - lambda_prev_2
836
+ h_0 = lambda_prev_0 - lambda_prev_1
837
+ h = lambda_t - lambda_prev_0
838
+ r0, r1 = h_0 / h, h_1 / h
839
+ D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
840
+ D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
841
+ D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
842
+ D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
843
+ if self.predict_x0:
844
+ x_t = (
845
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
846
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
847
+ + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
848
+ - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h**2 - 0.5), dims) * D2
849
+ )
850
+ else:
851
+ x_t = (
852
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
853
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
854
+ - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
855
+ - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h**2 - 0.5), dims) * D2
856
+ )
857
+ return x_t
858
+
859
+ def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, r2=None):
860
+ """
861
+ Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
862
+
863
+ Args:
864
+ x: A pytorch tensor. The initial value at time `s`.
865
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
866
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
867
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
868
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
869
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
870
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
871
+ r1: A `float`. The hyperparameter of the second-order or third-order solver.
872
+ r2: A `float`. The hyperparameter of the third-order solver.
873
+ Returns:
874
+ x_t: A pytorch tensor. The approximated solution at time `t`.
875
+ """
876
+ if order == 1:
877
+ return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
878
+ elif order == 2:
879
+ return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1)
880
+ elif order == 3:
881
+ return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2)
882
+ else:
883
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
884
+
885
+ def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
886
+ """
887
+ Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
888
+
889
+ Args:
890
+ x: A pytorch tensor. The initial value at time `s`.
891
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
892
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
893
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
894
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
895
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
896
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
897
+ Returns:
898
+ x_t: A pytorch tensor. The approximated solution at time `t`.
899
+ """
900
+ if order == 1:
901
+ return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
902
+ elif order == 2:
903
+ return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
904
+ elif order == 3:
905
+ return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
906
+ else:
907
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
908
+
909
+ def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpm_solver'):
910
+ """
911
+ The adaptive step size solver based on singlestep DPM-Solver.
912
+
913
+ Args:
914
+ x: A pytorch tensor. The initial value at time `t_T`.
915
+ order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
916
+ t_T: A `float`. The starting time of the sampling (default is T).
917
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
918
+ h_init: A `float`. The initial step size (for logSNR).
919
+ atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
920
+ rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
921
+ theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
922
+ t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
923
+ current time and `t_0` is less than `t_err`. The default setting is 1e-5.
924
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
925
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
926
+ Returns:
927
+ x_0: A pytorch tensor. The approximated solution at time `t_0`.
928
+
929
+ [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
930
+ """
931
+ ns = self.noise_schedule
932
+ s = t_T * torch.ones((x.shape[0],)).to(x)
933
+ lambda_s = ns.marginal_lambda(s)
934
+ lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
935
+ h = h_init * torch.ones_like(s).to(x)
936
+ x_prev = x
937
+ nfe = 0
938
+ if order == 2:
939
+ r1 = 0.5
940
+ lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
941
+ higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs)
942
+ elif order == 3:
943
+ r1, r2 = 1. / 3., 2. / 3.
944
+ lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type)
945
+ higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs)
946
+ else:
947
+ raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
948
+ while torch.abs((s - t_0)).mean() > t_err:
949
+ t = ns.inverse_lambda(lambda_s + h)
950
+ x_lower, lower_noise_kwargs = lower_update(x, s, t)
951
+ x_higher = higher_update(x, s, t, **lower_noise_kwargs)
952
+ delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
953
+ norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
954
+ E = norm_fn((x_higher - x_lower) / delta).max()
955
+ if torch.all(E <= 1.):
956
+ x = x_higher
957
+ s = t
958
+ x_prev = x_lower
959
+ lambda_s = ns.marginal_lambda(s)
960
+ h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
961
+ nfe += order
962
+ print('adaptive solver nfe', nfe)
963
+ return x
964
+
965
+ def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
966
+ method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
967
+ atol=0.0078, rtol=0.05,
968
+ ):
969
+ """
970
+ Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
971
+
972
+ =====================================================
973
+
974
+ We support the following algorithms for both noise prediction model and data prediction model:
975
+ - 'singlestep':
976
+ Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
977
+ We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
978
+ The total number of function evaluations (NFE) == `steps`.
979
+ Given a fixed NFE == `steps`, the sampling procedure is:
980
+ - If `order` == 1:
981
+ - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
982
+ - If `order` == 2:
983
+ - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
984
+ - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
985
+ - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
986
+ - If `order` == 3:
987
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
988
+ - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
989
+ - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
990
+ - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
991
+ - 'multistep':
992
+ Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
993
+ We initialize the first `order` values by lower order multistep solvers.
994
+ Given a fixed NFE == `steps`, the sampling procedure is:
995
+ Denote K = steps.
996
+ - If `order` == 1:
997
+ - We use K steps of DPM-Solver-1 (i.e. DDIM).
998
+ - If `order` == 2:
999
+ - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
1000
+ - If `order` == 3:
1001
+ - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
1002
+ - 'singlestep_fixed':
1003
+ Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
1004
+ We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
1005
+ - 'adaptive':
1006
+ Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
1007
+ We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
1008
+ You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
1009
+ (NFE) and the sample quality.
1010
+ - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
1011
+ - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
1012
+
1013
+ =====================================================
1014
+
1015
+ Some advices for choosing the algorithm:
1016
+ - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
1017
+ Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
1018
+ e.g.
1019
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
1020
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
1021
+ skip_type='time_uniform', method='singlestep')
1022
+ - For **guided sampling with large guidance scale** by DPMs:
1023
+ Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
1024
+ e.g.
1025
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
1026
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
1027
+ skip_type='time_uniform', method='multistep')
1028
+
1029
+ We support three types of `skip_type`:
1030
+ - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
1031
+ - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
1032
+ - 'time_quadratic': quadratic time for the time steps.
1033
+
1034
+ =====================================================
1035
+ Args:
1036
+ x: A pytorch tensor. The initial value at time `t_start`
1037
+ e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
1038
+ steps: A `int`. The total number of function evaluations (NFE).
1039
+ t_start: A `float`. The starting time of the sampling.
1040
+ If `T` is None, we use self.noise_schedule.T (default is 1.0).
1041
+ t_end: A `float`. The ending time of the sampling.
1042
+ If `t_end` is None, we use 1. / self.noise_schedule.total_N.
1043
+ e.g. if total_N == 1000, we have `t_end` == 1e-3.
1044
+ For discrete-time DPMs:
1045
+ - We recommend `t_end` == 1. / self.noise_schedule.total_N.
1046
+ For continuous-time DPMs:
1047
+ - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
1048
+ order: A `int`. The order of DPM-Solver.
1049
+ skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
1050
+ method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
1051
+ denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
1052
+ Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
1053
+
1054
+ This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
1055
+ score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
1056
+ for diffusion models sampling by diffusion SDEs for low-resolutional images
1057
+ (such as CIFAR-10). However, we observed that such trick does not matter for
1058
+ high-resolutional images. As it needs an additional NFE, we do not recommend
1059
+ it for high-resolutional images.
1060
+ lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
1061
+ Only valid for `method=multistep` and `steps < 15`. We empirically find that
1062
+ this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
1063
+ (especially for steps <= 10). So we recommend to set it to be `True`.
1064
+ solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
1065
+ atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1066
+ rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1067
+ Returns:
1068
+ x_end: A pytorch tensor. The approximated solution at time `t_end`.
1069
+
1070
+ """
1071
+ t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
1072
+ t_T = self.noise_schedule.T if t_start is None else t_start
1073
+ device = x.device
1074
+ if method == 'adaptive':
1075
+ with torch.no_grad():
1076
+ x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type)
1077
+ elif method == 'multistep':
1078
+ assert steps >= order
1079
+ timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
1080
+ assert timesteps.shape[0] - 1 == steps
1081
+ with torch.no_grad():
1082
+ vec_t = timesteps[0].expand((x.shape[0]))
1083
+ model_prev_list = [self.model_fn(x, vec_t)]
1084
+ t_prev_list = [vec_t]
1085
+ # Init the first `order` values by lower order multistep DPM-Solver.
1086
+ for init_order in range(1, order):
1087
+ vec_t = timesteps[init_order].expand(x.shape[0])
1088
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, solver_type=solver_type)
1089
+ model_prev_list.append(self.model_fn(x, vec_t))
1090
+ t_prev_list.append(vec_t)
1091
+ # Compute the remaining values by `order`-th order multistep DPM-Solver.
1092
+ for step in range(order, steps + 1):
1093
+ vec_t = timesteps[step].expand(x.shape[0])
1094
+ if lower_order_final and steps < 15:
1095
+ step_order = min(order, steps + 1 - step)
1096
+ else:
1097
+ step_order = order
1098
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, solver_type=solver_type)
1099
+ for i in range(order - 1):
1100
+ t_prev_list[i] = t_prev_list[i + 1]
1101
+ model_prev_list[i] = model_prev_list[i + 1]
1102
+ t_prev_list[-1] = vec_t
1103
+ # We do not need to evaluate the final model value.
1104
+ if step < steps:
1105
+ model_prev_list[-1] = self.model_fn(x, vec_t)
1106
+ elif method in ['singlestep', 'singlestep_fixed']:
1107
+ if method == 'singlestep':
1108
+ timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device)
1109
+ elif method == 'singlestep_fixed':
1110
+ K = steps // order
1111
+ orders = [order,] * K
1112
+ timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
1113
+ for i, order in enumerate(orders):
1114
+ t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
1115
+ timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), N=order, device=device)
1116
+ lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
1117
+ vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])
1118
+ h = lambda_inner[-1] - lambda_inner[0]
1119
+ r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
1120
+ r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
1121
+ x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
1122
+ if denoise_to_zero:
1123
+ x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
1124
+ return x
1125
+
1126
+
1127
+
1128
+ #############################################################
1129
+ # other utility functions
1130
+ #############################################################
1131
+
1132
+ def interpolate_fn(x, xp, yp):
1133
+ """
1134
+ A piecewise linear function y = f(x), using xp and yp as keypoints.
1135
+ We implement f(x) in a differentiable way (i.e. applicable for autograd).
1136
+ The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
1137
+
1138
+ Args:
1139
+ x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
1140
+ xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
1141
+ yp: PyTorch tensor with shape [C, K].
1142
+ Returns:
1143
+ The function values f(x), with shape [N, C].
1144
+ """
1145
+ N, K = x.shape[0], xp.shape[1]
1146
+ all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
1147
+ sorted_all_x, x_indices = torch.sort(all_x, dim=2)
1148
+ x_idx = torch.argmin(x_indices, dim=2)
1149
+ cand_start_idx = x_idx - 1
1150
+ start_idx = torch.where(
1151
+ torch.eq(x_idx, 0),
1152
+ torch.tensor(1, device=x.device),
1153
+ torch.where(
1154
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1155
+ ),
1156
+ )
1157
+ end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
1158
+ start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
1159
+ end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
1160
+ start_idx2 = torch.where(
1161
+ torch.eq(x_idx, 0),
1162
+ torch.tensor(0, device=x.device),
1163
+ torch.where(
1164
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1165
+ ),
1166
+ )
1167
+ y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
1168
+ start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
1169
+ end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
1170
+ cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
1171
+ return cand
1172
+
1173
+
1174
+ def expand_dims(v, dims):
1175
+ """
1176
+ Expand the tensor `v` to the dim `dims`.
1177
+
1178
+ Args:
1179
+ `v`: a PyTorch tensor with shape [N].
1180
+ `dim`: a `int`.
1181
+ Returns:
1182
+ a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
1183
+ """
1184
+ return v[(...,) + (None,)*(dims - 1)]
stable-diffusion/ldm/models/diffusion/dpm_solver/sampler.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SAMPLING ONLY."""
2
+
3
+ import torch
4
+
5
+ from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver
6
+
7
+
8
+ class DPMSolverSampler(object):
9
+ def __init__(self, model, **kwargs):
10
+ super().__init__()
11
+ self.model = model
12
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
13
+ self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
14
+
15
+ def register_buffer(self, name, attr):
16
+ if type(attr) == torch.Tensor:
17
+ if attr.device != torch.device("cuda"):
18
+ attr = attr.to(torch.device("cuda"))
19
+ setattr(self, name, attr)
20
+
21
+ @torch.no_grad()
22
+ def sample(self,
23
+ S,
24
+ batch_size,
25
+ shape,
26
+ conditioning=None,
27
+ callback=None,
28
+ normals_sequence=None,
29
+ img_callback=None,
30
+ quantize_x0=False,
31
+ eta=0.,
32
+ mask=None,
33
+ x0=None,
34
+ temperature=1.,
35
+ noise_dropout=0.,
36
+ score_corrector=None,
37
+ corrector_kwargs=None,
38
+ verbose=True,
39
+ x_T=None,
40
+ log_every_t=100,
41
+ unconditional_guidance_scale=1.,
42
+ unconditional_conditioning=None,
43
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
44
+ **kwargs
45
+ ):
46
+ if conditioning is not None:
47
+ if isinstance(conditioning, dict):
48
+ cbs = conditioning[list(conditioning.keys())[0]].shape[0]
49
+ if cbs != batch_size:
50
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
51
+ else:
52
+ if conditioning.shape[0] != batch_size:
53
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
54
+
55
+ # sampling
56
+ C, H, W = shape
57
+ size = (batch_size, C, H, W)
58
+
59
+ # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')
60
+
61
+ device = self.model.betas.device
62
+ if x_T is None:
63
+ img = torch.randn(size, device=device)
64
+ else:
65
+ img = x_T
66
+
67
+ ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
68
+
69
+ model_fn = model_wrapper(
70
+ lambda x, t, c: self.model.apply_model(x, t, c),
71
+ ns,
72
+ model_type="noise",
73
+ guidance_type="classifier-free",
74
+ condition=conditioning,
75
+ unconditional_condition=unconditional_conditioning,
76
+ guidance_scale=unconditional_guidance_scale,
77
+ )
78
+
79
+ dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False)
80
+ x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True)
81
+
82
+ return x.to(device), None
stable-diffusion/ldm/models/diffusion/plms.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SAMPLING ONLY."""
2
+
3
+ import torch
4
+ import numpy as np
5
+ from tqdm import tqdm
6
+ from functools import partial
7
+
8
+ from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
9
+
10
+
11
+ class PLMSSampler(object):
12
+ def __init__(self, model, schedule="linear", **kwargs):
13
+ super().__init__()
14
+ self.model = model
15
+ self.ddpm_num_timesteps = model.num_timesteps
16
+ self.schedule = schedule
17
+
18
+ def register_buffer(self, name, attr):
19
+ if type(attr) == torch.Tensor:
20
+ if attr.device != torch.device("cuda"):
21
+ attr = attr.to(torch.device("cuda"))
22
+ setattr(self, name, attr)
23
+
24
+ def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
25
+ if ddim_eta != 0:
26
+ raise ValueError('ddim_eta must be 0 for PLMS')
27
+ self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
28
+ num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
29
+ alphas_cumprod = self.model.alphas_cumprod
30
+ assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
31
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
32
+
33
+ self.register_buffer('betas', to_torch(self.model.betas))
34
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
35
+ self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
36
+
37
+ # calculations for diffusion q(x_t | x_{t-1}) and others
38
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
39
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
40
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
41
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
42
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
43
+
44
+ # ddim sampling parameters
45
+ ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
46
+ ddim_timesteps=self.ddim_timesteps,
47
+ eta=ddim_eta,verbose=verbose)
48
+ self.register_buffer('ddim_sigmas', ddim_sigmas)
49
+ self.register_buffer('ddim_alphas', ddim_alphas)
50
+ self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
51
+ self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
52
+ sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
53
+ (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
54
+ 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
55
+ self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
56
+
57
+ @torch.no_grad()
58
+ def sample(self,
59
+ S,
60
+ batch_size,
61
+ shape,
62
+ conditioning=None,
63
+ callback=None,
64
+ normals_sequence=None,
65
+ img_callback=None,
66
+ quantize_x0=False,
67
+ eta=0.,
68
+ mask=None,
69
+ x0=None,
70
+ temperature=1.,
71
+ noise_dropout=0.,
72
+ score_corrector=None,
73
+ corrector_kwargs=None,
74
+ verbose=True,
75
+ x_T=None,
76
+ log_every_t=100,
77
+ unconditional_guidance_scale=1.,
78
+ unconditional_conditioning=None,
79
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
80
+ **kwargs
81
+ ):
82
+ if conditioning is not None:
83
+ if isinstance(conditioning, dict):
84
+ cbs = conditioning[list(conditioning.keys())[0]].shape[0]
85
+ if cbs != batch_size:
86
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
87
+ else:
88
+ if conditioning.shape[0] != batch_size:
89
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
90
+
91
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
92
+ # sampling
93
+ C, H, W = shape
94
+ size = (batch_size, C, H, W)
95
+ print(f'Data shape for PLMS sampling is {size}')
96
+
97
+ samples, intermediates = self.plms_sampling(conditioning, size,
98
+ callback=callback,
99
+ img_callback=img_callback,
100
+ quantize_denoised=quantize_x0,
101
+ mask=mask, x0=x0,
102
+ ddim_use_original_steps=False,
103
+ noise_dropout=noise_dropout,
104
+ temperature=temperature,
105
+ score_corrector=score_corrector,
106
+ corrector_kwargs=corrector_kwargs,
107
+ x_T=x_T,
108
+ log_every_t=log_every_t,
109
+ unconditional_guidance_scale=unconditional_guidance_scale,
110
+ unconditional_conditioning=unconditional_conditioning,
111
+ )
112
+ return samples, intermediates
113
+
114
+ @torch.no_grad()
115
+ def plms_sampling(self, cond, shape,
116
+ x_T=None, ddim_use_original_steps=False,
117
+ callback=None, timesteps=None, quantize_denoised=False,
118
+ mask=None, x0=None, img_callback=None, log_every_t=100,
119
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
120
+ unconditional_guidance_scale=1., unconditional_conditioning=None,):
121
+ device = self.model.betas.device
122
+ b = shape[0]
123
+ if x_T is None:
124
+ img = torch.randn(shape, device=device)
125
+ else:
126
+ img = x_T
127
+
128
+ if timesteps is None:
129
+ timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
130
+ elif timesteps is not None and not ddim_use_original_steps:
131
+ subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
132
+ timesteps = self.ddim_timesteps[:subset_end]
133
+
134
+ intermediates = {'x_inter': [img], 'pred_x0': [img]}
135
+ time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
136
+ total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
137
+ print(f"Running PLMS Sampling with {total_steps} timesteps")
138
+
139
+ iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
140
+ old_eps = []
141
+
142
+ for i, step in enumerate(iterator):
143
+ index = total_steps - i - 1
144
+ ts = torch.full((b,), step, device=device, dtype=torch.long)
145
+ ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
146
+
147
+ if mask is not None:
148
+ assert x0 is not None
149
+ img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
150
+ img = img_orig * mask + (1. - mask) * img
151
+
152
+ outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
153
+ quantize_denoised=quantize_denoised, temperature=temperature,
154
+ noise_dropout=noise_dropout, score_corrector=score_corrector,
155
+ corrector_kwargs=corrector_kwargs,
156
+ unconditional_guidance_scale=unconditional_guidance_scale,
157
+ unconditional_conditioning=unconditional_conditioning,
158
+ old_eps=old_eps, t_next=ts_next)
159
+ img, pred_x0, e_t = outs
160
+ old_eps.append(e_t)
161
+ if len(old_eps) >= 4:
162
+ old_eps.pop(0)
163
+ if callback: callback(i)
164
+ if img_callback: img_callback(pred_x0, i)
165
+
166
+ if index % log_every_t == 0 or index == total_steps - 1:
167
+ intermediates['x_inter'].append(img)
168
+ intermediates['pred_x0'].append(pred_x0)
169
+
170
+ return img, intermediates
171
+
172
+ @torch.no_grad()
173
+ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
174
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
175
+ unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):
176
+ b, *_, device = *x.shape, x.device
177
+
178
+ def get_model_output(x, t):
179
+ if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
180
+ e_t = self.model.apply_model(x, t, c)
181
+ else:
182
+ x_in = torch.cat([x] * 2)
183
+ t_in = torch.cat([t] * 2)
184
+ c_in = torch.cat([unconditional_conditioning, c])
185
+ e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
186
+ e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
187
+
188
+ if score_corrector is not None:
189
+ assert self.model.parameterization == "eps"
190
+ e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
191
+
192
+ return e_t
193
+
194
+ alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
195
+ alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
196
+ sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
197
+ sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
198
+
199
+ def get_x_prev_and_pred_x0(e_t, index):
200
+ # select parameters corresponding to the currently considered timestep
201
+ a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
202
+ a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
203
+ sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
204
+ sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
205
+
206
+ # current prediction for x_0
207
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
208
+ if quantize_denoised:
209
+ pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
210
+ # direction pointing to x_t
211
+ dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
212
+ noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
213
+ if noise_dropout > 0.:
214
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
215
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
216
+ return x_prev, pred_x0
217
+
218
+ e_t = get_model_output(x, t)
219
+ if len(old_eps) == 0:
220
+ # Pseudo Improved Euler (2nd order)
221
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
222
+ e_t_next = get_model_output(x_prev, t_next)
223
+ e_t_prime = (e_t + e_t_next) / 2
224
+ elif len(old_eps) == 1:
225
+ # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
226
+ e_t_prime = (3 * e_t - old_eps[-1]) / 2
227
+ elif len(old_eps) == 2:
228
+ # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
229
+ e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
230
+ elif len(old_eps) >= 3:
231
+ # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
232
+ e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
233
+
234
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
235
+
236
+ return x_prev, pred_x0, e_t