HongFangzhou commited on
Commit
b13bb81
1 Parent(s): 27ef5eb

test 3dtopia

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. 3DTopia/.gitignore +4 -0
  3. 3DTopia/LICENSE +201 -0
  4. 3DTopia/README.md +65 -0
  5. 3DTopia/configs/default.yaml +72 -0
  6. 3DTopia/environment.yml +150 -0
  7. 3DTopia/gradio_demo.py +334 -0
  8. 3DTopia/ldm/data/__init__.py +0 -0
  9. 3DTopia/ldm/data/base.py +23 -0
  10. 3DTopia/ldm/data/imagenet.py +394 -0
  11. 3DTopia/ldm/data/lsun.py +92 -0
  12. 3DTopia/ldm/lr_scheduler.py +98 -0
  13. 3DTopia/ldm/models/autoencoder.py +443 -0
  14. 3DTopia/ldm/models/diffusion/__init__.py +0 -0
  15. 3DTopia/ldm/models/diffusion/classifier.py +267 -0
  16. 3DTopia/ldm/models/diffusion/ddim.py +241 -0
  17. 3DTopia/ldm/models/diffusion/ddpm.py +1746 -0
  18. 3DTopia/ldm/models/diffusion/ddpm_preprocess.py +1716 -0
  19. 3DTopia/ldm/models/diffusion/dpm_solver/__init__.py +1 -0
  20. 3DTopia/ldm/models/diffusion/dpm_solver/dpm_solver.py +1184 -0
  21. 3DTopia/ldm/models/diffusion/dpm_solver/sampler.py +82 -0
  22. 3DTopia/ldm/models/diffusion/plms.py +236 -0
  23. 3DTopia/ldm/modules/attention.py +261 -0
  24. 3DTopia/ldm/modules/diffusionmodules/__init__.py +0 -0
  25. 3DTopia/ldm/modules/diffusionmodules/model.py +835 -0
  26. 3DTopia/ldm/modules/diffusionmodules/openaimodel.py +965 -0
  27. 3DTopia/ldm/modules/diffusionmodules/triplane_3daware_unet.py +991 -0
  28. 3DTopia/ldm/modules/diffusionmodules/triplane_context_crossattention_unet.py +1126 -0
  29. 3DTopia/ldm/modules/diffusionmodules/triplane_crossattention_unet.py +1058 -0
  30. 3DTopia/ldm/modules/diffusionmodules/util.py +305 -0
  31. 3DTopia/ldm/modules/distributions/__init__.py +0 -0
  32. 3DTopia/ldm/modules/distributions/distributions.py +92 -0
  33. 3DTopia/ldm/modules/ema.py +76 -0
  34. 3DTopia/ldm/modules/encoders/__init__.py +0 -0
  35. 3DTopia/ldm/modules/encoders/modules.py +386 -0
  36. 3DTopia/ldm/modules/image_degradation/__init__.py +2 -0
  37. 3DTopia/ldm/modules/image_degradation/bsrgan.py +730 -0
  38. 3DTopia/ldm/modules/image_degradation/bsrgan_light.py +650 -0
  39. 3DTopia/ldm/modules/image_degradation/utils/test.png +0 -0
  40. 3DTopia/ldm/modules/image_degradation/utils_image.py +916 -0
  41. 3DTopia/ldm/modules/losses/__init__.py +1 -0
  42. 3DTopia/ldm/modules/losses/contperceptual.py +111 -0
  43. 3DTopia/ldm/modules/losses/vqperceptual.py +167 -0
  44. 3DTopia/ldm/modules/x_transformer.py +641 -0
  45. 3DTopia/ldm/util.py +203 -0
  46. 3DTopia/model/auto_regressive.py +412 -0
  47. 3DTopia/model/sv_vae_triplane.py +111 -0
  48. 3DTopia/model/triplane_vae.py +0 -0
  49. 3DTopia/model/triplane_vqvae.py +418 -0
  50. 3DTopia/module/model_2d.py +2206 -0
.gitattributes CHANGED
@@ -25,6 +25,7 @@
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
29
  *.tgz filter=lfs diff=lfs merge=lfs -text
30
  *.wasm filter=lfs diff=lfs merge=lfs -text
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
  *.wasm filter=lfs diff=lfs merge=lfs -text
3DTopia/.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ __pycache__
2
+ checkpoints
3
+ results
4
+ tmp
3DTopia/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
3DTopia/README.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <p align="center">
2
+ <picture>
3
+ <img alt="logo" src="assets/3dtopia.jpeg" width="20%">
4
+ </picture>
5
+ </p>
6
+ <div align="center">
7
+ <h1>3DTopia</h1>
8
+ A two-stage text-to-3D generation model. The first stage uses diffusion model to quickly generate candidates. The second stage refines the assets chosen from the first stage.
9
+
10
+ https://github.com/3DTopia/3DTopia/assets/23376858/c9716cf0-6e61-4983-82b2-2e8f579bd46c
11
+
12
+ </div>
13
+
14
+ ## News
15
+
16
+ [2024/01/18] We release a text-to-3D model 3DTopia!
17
+
18
+ ## 1. Quick Start
19
+
20
+ ### 1.1 Install Environment for this Repository
21
+ We recommend using Anaconda to manage the environment.
22
+ ```bash
23
+ conda env create -f environment.yml
24
+ ```
25
+
26
+ ### 1.2 Install Second Stage Refiner
27
+ Please refer to [threefiner](https://github.com/3DTopia/threefiner) to install our second stage mesh refiner. We have tested installing both environments together with Pytorch 1.12.0 and CUDA 11.3.
28
+
29
+ ### 1.3 Download Checkpoints \[Optional\]
30
+ We have implemented automatic checkpoint download for both `gradio_demo.py` and `sample_stage1.py`. If you prefer to download manually, you may download checkpoint `3dtopia_diffusion_state_dict.ckpt` or `model.safetensors` from [huggingface](https://huggingface.co/hongfz16/3DTopia).
31
+
32
+ ### Q&A
33
+ - If you encounter this error in the second stage `ImportError: /lib64/libc.so.6: version 'GLIBC_2.25' not found`, try to install a lower version of pymeshlab by `pip install pymeshlab==0.2`.
34
+
35
+ ## 2. Inference
36
+
37
+ ### 2.1 First Stage
38
+ Run the following command to sample `a robot` as the first stage. Results will be located under the folder `results`.
39
+ ```bash
40
+ python -u sample_stage1.py --text "a robot" --samples 1 --sampler ddim --steps 200 --cfg_scale 7.5 --seed 0
41
+ ```
42
+
43
+ Arguments:
44
+ - `--ckpt` specifies checkpoint file path;
45
+ - `--test_folder` controls which subfolder to put all the results;
46
+ - `--seed` will fix random seeds; `--sampler` can be set to `ddim` for DDIM sampling (By default, we use 1000 steps DDPM sampling);
47
+ - `--steps` controls sampling steps only for DDIM;
48
+ - `--samples` controls number of samples;
49
+ - `--text` is the input text;
50
+ - `--no_video` and `--no_mcubes` suppress rendering multi-view videos and marching cubes, which are by-default enabled;
51
+ - `--mcubes_res` controls the resolution of the 3D volumn sampled for marching cubes; One can lower this resolution to save graphics memory;
52
+ - `--render_res` controls the resolution of the rendered video;
53
+
54
+ ### 2.2 Second Stage
55
+ There are two steps as the second stage refinement. Here is a simple example. Please refer to [threefiner](https://github.com/3DTopia/threefiner) for more detailed usage.
56
+ ```bash
57
+ # step 1
58
+ threefiner sd --mesh results/default/stage1/a_robot_0_0.ply --prompt "a robot" --text_dir --front_dir='-y' --outdir results/default/stage2/ --save a_robot_0_0_sd.glb
59
+ # step 2
60
+ threefiner if2 --mesh results/default/stage2/a_robot_0_0_sd.glb --prompt "a robot" --outdir results/default/stage2/ --save a_robot_0_0_if2.glb
61
+ ```
62
+ The resulting mesh can be found at `results/default/stage2/a_robot_0_0_if2.glb`
63
+
64
+ ## 3. Acknowledgement
65
+ We thank the community for building and open-sourcing the foundation of this work. Specifically, we want to thank [EG3D](https://github.com/NVlabs/eg3d), [Stable Diffusion](https://github.com/CompVis/stable-diffusion) for their codes. We also want to thank [Objaverse](https://objaverse.allenai.org) for the wonderful dataset.
3DTopia/configs/default.yaml ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
3
+ params:
4
+ linear_start: 0.00085
5
+ linear_end: 0.0120
6
+ shift_scale: 2
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "triplane"
11
+ cond_stage_key: "caption"
12
+ image_size: 32
13
+ channels: 8
14
+ cond_stage_trainable: false
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.5147210212065061
18
+ use_ema: False
19
+ learning_rate: 5e-5
20
+
21
+ unet_config:
22
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
23
+ params:
24
+ image_size: 32
25
+ in_channels: 8
26
+ out_channels: 8
27
+ model_channels: 320
28
+ attention_resolutions: [4, 2, 1]
29
+ num_res_blocks: 2
30
+ channel_mult: [ 1, 2, 4, 4 ]
31
+ num_heads: 8
32
+ use_spatial_transformer: True
33
+ context_dim: 768
34
+ transformer_depth: 1
35
+ use_checkpoint: True
36
+ legacy: False
37
+
38
+ first_stage_config:
39
+ target: model.triplane_vae.AutoencoderKLRollOut
40
+ params:
41
+ embed_dim: 8
42
+ learning_rate: 1e-5
43
+ norm: False
44
+ renderer_type: eg3d
45
+ ddconfig:
46
+ double_z: true
47
+ z_channels: 8
48
+ resolution: 256
49
+ in_channels: 32
50
+ out_ch: 32
51
+ ch: 128
52
+ ch_mult:
53
+ - 2
54
+ - 4
55
+ - 4
56
+ - 8
57
+ num_res_blocks: 2
58
+ attn_resolutions: [32]
59
+ dropout: 0.0
60
+ lossconfig:
61
+ kl_weight: 1e-5
62
+ rec_weight: 1
63
+ latent_tv_weight: 2e-3
64
+ renderer_config:
65
+ rgbnet_dim: -1
66
+ rgbnet_width: 128
67
+ sigma_dim: 12
68
+ c_dim: 20
69
+
70
+ cond_stage_config:
71
+ target: ldm.modules.encoders.modules.FrozenCLIPTextEmbedder
72
+
3DTopia/environment.yml ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: 3dtopia
2
+ channels:
3
+ - pytorch
4
+ - anaconda
5
+ - conda-forge
6
+ - defaults
7
+ dependencies:
8
+ - _libgcc_mutex=0.1=main
9
+ - _openmp_mutex=5.1=1_gnu
10
+ - blas=1.0=mkl
11
+ - brotli=1.0.9=h166bdaf_7
12
+ - brotli-bin=1.0.9=h166bdaf_7
13
+ - bzip2=1.0.8=h7f98852_4
14
+ - ca-certificates=2023.5.7=hbcca054_0
15
+ - certifi=2023.5.7=pyhd8ed1ab_0
16
+ - charset-normalizer=3.1.0=pyhd8ed1ab_0
17
+ - colorama=0.4.6=pyhd8ed1ab_0
18
+ - cudatoolkit=11.3.1=h9edb442_10
19
+ - ffmpeg=4.3.2=hca11adc_0
20
+ - freetype=2.10.4=h0708190_1
21
+ - fsspec=2023.5.0=pyh1a96a4e_0
22
+ - gmp=6.2.1=h58526e2_0
23
+ - gnutls=3.6.13=h85f3911_1
24
+ - idna=3.4=pyhd8ed1ab_0
25
+ - intel-openmp=2021.4.0=h06a4308_3561
26
+ - jpeg=9e=h166bdaf_1
27
+ - lame=3.100=h7f98852_1001
28
+ - lcms2=2.12=hddcbb42_0
29
+ - ld_impl_linux-64=2.38=h1181459_1
30
+ - libbrotlicommon=1.0.9=h166bdaf_7
31
+ - libbrotlidec=1.0.9=h166bdaf_7
32
+ - libbrotlienc=1.0.9=h166bdaf_7
33
+ - libffi=3.4.4=h6a678d5_0
34
+ - libgcc-ng=11.2.0=h1234567_1
35
+ - libgomp=11.2.0=h1234567_1
36
+ - libpng=1.6.37=h21135ba_2
37
+ - libstdcxx-ng=11.2.0=h1234567_1
38
+ - libtiff=4.2.0=hecacb30_2
39
+ - libwebp-base=1.2.2=h7f98852_1
40
+ - lightning-utilities=0.8.0=pyhd8ed1ab_0
41
+ - lz4-c=1.9.3=h9c3ff4c_1
42
+ - mkl=2021.4.0=h06a4308_640
43
+ - mkl-service=2.4.0=py38h95df7f1_0
44
+ - mkl_fft=1.3.1=py38h8666266_1
45
+ - mkl_random=1.2.2=py38h1abd341_0
46
+ - ncurses=6.4=h6a678d5_0
47
+ - nettle=3.6=he412f7d_0
48
+ - numpy=1.24.3=py38h14f4228_0
49
+ - numpy-base=1.24.3=py38h31eccc5_0
50
+ - olefile=0.46=pyh9f0ad1d_1
51
+ - openh264=2.1.1=h780b84a_0
52
+ - openjpeg=2.4.0=hb52868f_1
53
+ - openssl=1.1.1u=h7f8727e_0
54
+ - packaging=23.1=pyhd8ed1ab_0
55
+ - pip=23.0.1=py38h06a4308_0
56
+ - pixman-cos6-x86_64=0.32.8=4
57
+ - pysocks=1.7.1=pyha2e5f31_6
58
+ - python=3.8.16=h7a1cb2a_3
59
+ - python_abi=3.8=2_cp38
60
+ - pytorch=1.12.0=py3.8_cuda11.3_cudnn8.3.2_0
61
+ - pytorch-lightning=2.0.2=pyhd8ed1ab_0
62
+ - pytorch-mutex=1.0=cuda
63
+ - pyyaml=6.0=py38h0a891b7_4
64
+ - readline=8.2=h5eee18b_0
65
+ - requests=2.31.0=pyhd8ed1ab_0
66
+ - setuptools=67.8.0=py38h06a4308_0
67
+ - six=1.16.0=pyh6c4a22f_0
68
+ - sqlite=3.41.2=h5eee18b_0
69
+ - tk=8.6.12=h1ccaba5_0
70
+ - torchaudio=0.12.0=py38_cu113
71
+ - torchmetrics=0.11.4=pyhd8ed1ab_0
72
+ - torchvision=0.13.0=py38_cu113
73
+ - tqdm=4.65.0=pyhd8ed1ab_1
74
+ - typing_extensions=4.6.3=pyha770c72_0
75
+ - urllib3=2.0.2=pyhd8ed1ab_0
76
+ - wheel=0.38.4=py38h06a4308_0
77
+ - x264=1!161.3030=h7f98852_1
78
+ - xorg-x11-server-common-cos6-x86_64=1.17.4=4
79
+ - xorg-x11-server-xvfb-cos6-x86_64=1.17.4=4
80
+ - xz=5.4.2=h5eee18b_0
81
+ - yaml=0.2.5=h7f98852_2
82
+ - zlib=1.2.13=h5eee18b_0
83
+ - zstd=1.5.2=ha4553b6_0
84
+ - pip:
85
+ - antlr4-python3-runtime==4.9.3
86
+ - appdirs==1.4.4
87
+ - asttokens==2.4.0
88
+ - av==10.0.0
89
+ - backcall==0.2.0
90
+ - click==8.1.3
91
+ - git+https://github.com/openai/CLIP.git
92
+ - contourpy==1.1.1
93
+ - cycler==0.12.1
94
+ - decorator==5.1.1
95
+ - docker-pycreds==0.4.0
96
+ - einops==0.6.1
97
+ - executing==1.2.0
98
+ - filelock==3.12.2
99
+ - fonttools==4.43.1
100
+ - ftfy==6.1.1
101
+ - gitdb==4.0.10
102
+ - gitpython==3.1.31
103
+ - huggingface-hub==0.16.4
104
+ - imageio==2.31.0
105
+ - imageio-ffmpeg==0.4.8
106
+ - importlib-resources==6.1.0
107
+ - ipdb==0.13.13
108
+ - ipython==8.12.2
109
+ - jedi==0.19.0
110
+ - kiwisolver==1.4.5
111
+ - kornia==0.6.0
112
+ - lpips==0.1.4
113
+ - matplotlib==3.7.3
114
+ - matplotlib-inline==0.1.6
115
+ - omegaconf==2.3.0
116
+ - open-clip-torch==2.20.0
117
+ - opencv-python==4.7.0.72
118
+ - parso==0.8.3
119
+ - pathtools==0.1.2
120
+ - pexpect==4.8.0
121
+ - pickleshare==0.7.5
122
+ - pillow==9.5.0
123
+ - prompt-toolkit==3.0.39
124
+ - protobuf==3.20.3
125
+ - psutil==5.9.5
126
+ - ptyprocess==0.7.0
127
+ - pure-eval==0.2.2
128
+ - pygments==2.16.1
129
+ - pymcubes==0.1.4
130
+ - pyparsing==3.1.1
131
+ - pytorch-fid==0.3.0
132
+ - pytorch-msssim==1.0.0
133
+ - regex==2023.6.3
134
+ - safetensors==0.3.3
135
+ - scipy==1.10.1
136
+ - sentencepiece==0.1.99
137
+ - sentry-sdk==1.25.0
138
+ - setproctitle==1.3.2
139
+ - smmap==5.0.0
140
+ - stack-data==0.6.2
141
+ - timm==0.9.7
142
+ - tokenizers==0.12.1
143
+ - tomli==2.0.1
144
+ - traitlets==5.9.0
145
+ - transformers
146
+ - trimesh==4.0.2
147
+ - vit-pytorch==1.2.2
148
+ - wandb==0.15.3
149
+ - wcwidth==0.2.6
150
+ - zipp==3.17.0
3DTopia/gradio_demo.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import time
4
+ import json
5
+ import torch
6
+ import mcubes
7
+ import trimesh
8
+ import datetime
9
+ import argparse
10
+ import subprocess
11
+ import numpy as np
12
+ import gradio as gr
13
+ from tqdm import tqdm
14
+ import imageio.v2 as imageio
15
+ import pytorch_lightning as pl
16
+ from omegaconf import OmegaConf
17
+
18
+ from ldm.models.diffusion.ddim import DDIMSampler
19
+ from ldm.models.diffusion.plms import PLMSSampler
20
+ from ldm.models.diffusion.dpm_solver import DPMSolverSampler
21
+
22
+ from utility.initialize import instantiate_from_config, get_obj_from_str
23
+ from utility.triplane_renderer.eg3d_renderer import sample_from_planes, generate_planes
24
+ from utility.triplane_renderer.renderer import get_rays, to8b
25
+ from safetensors.torch import load_file
26
+ from huggingface_hub import hf_hub_download
27
+
28
+ import warnings
29
+ warnings.filterwarnings("ignore", category=UserWarning)
30
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
31
+
32
+ def add_text(rgb, caption):
33
+ font = cv2.FONT_HERSHEY_SIMPLEX
34
+ # org
35
+ gap = 10
36
+ org = (gap, gap)
37
+ # fontScale
38
+ fontScale = 0.3
39
+ # Blue color in BGR
40
+ color = (255, 0, 0)
41
+ # Line thickness of 2 px
42
+ thickness = 1
43
+ break_caption = []
44
+ for i in range(len(caption) // 30 + 1):
45
+ break_caption_i = caption[i*30:(i+1)*30]
46
+ break_caption.append(break_caption_i)
47
+ for i, bci in enumerate(break_caption):
48
+ cv2.putText(rgb, bci, (gap, gap*(i+1)), font, fontScale, color, thickness, cv2.LINE_AA)
49
+ return rgb
50
+
51
+ config = "configs/default.yaml"
52
+ # ckpt = "checkpoints/3dtopia_diffusion_state_dict.ckpt"
53
+ ckpt = hf_hub_download(repo_id="hongfz16/3DTopia", filename="model.safetensors")
54
+ configs = OmegaConf.load(config)
55
+ os.makedirs("tmp", exist_ok=True)
56
+
57
+ if ckpt.endswith(".ckpt"):
58
+ model = get_obj_from_str(configs.model["target"]).load_from_checkpoint(ckpt, map_location='cpu', strict=False, **configs.model.params)
59
+ elif ckpt.endswith(".safetensors"):
60
+ model = get_obj_from_str(configs.model["target"])(**configs.model.params)
61
+ model_ckpt = load_file(ckpt)
62
+ model.load_state_dict(model_ckpt)
63
+ else:
64
+ raise NotImplementedError
65
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
66
+ model = model.to(device)
67
+ sampler = DDIMSampler(model)
68
+
69
+ img_size = configs.model.params.unet_config.params.image_size
70
+ channels = configs.model.params.unet_config.params.in_channels
71
+ shape = [channels, img_size, img_size * 3]
72
+
73
+ pose_folder = 'assets/sample_data/pose'
74
+ poses_fname = sorted([os.path.join(pose_folder, f) for f in os.listdir(pose_folder)])
75
+ batch_rays_list = []
76
+ H = 128
77
+ ratio = 512 // H
78
+ for p in poses_fname:
79
+ c2w = np.loadtxt(p).reshape(4, 4)
80
+ c2w[:3, 3] *= 2.2
81
+ c2w = np.array([
82
+ [1, 0, 0, 0],
83
+ [0, 0, -1, 0],
84
+ [0, 1, 0, 0],
85
+ [0, 0, 0, 1]
86
+ ]) @ c2w
87
+
88
+ k = np.array([
89
+ [560 / ratio, 0, H * 0.5],
90
+ [0, 560 / ratio, H * 0.5],
91
+ [0, 0, 1]
92
+ ])
93
+
94
+ rays_o, rays_d = get_rays(H, H, torch.Tensor(k), torch.Tensor(c2w[:3, :4]))
95
+ coords = torch.stack(torch.meshgrid(torch.linspace(0, H-1, H), torch.linspace(0, H-1, H), indexing='ij'), -1)
96
+ coords = torch.reshape(coords, [-1,2]).long()
97
+ rays_o = rays_o[coords[:, 0], coords[:, 1]]
98
+ rays_d = rays_d[coords[:, 0], coords[:, 1]]
99
+ batch_rays = torch.stack([rays_o, rays_d], 0)
100
+ batch_rays_list.append(batch_rays)
101
+ batch_rays_list = torch.stack(batch_rays_list, 0)
102
+
103
+ def marching_cube(b, text, global_info):
104
+ # prepare volumn for marching cube
105
+ res = 128
106
+ assert 'decode_res' in global_info
107
+ decode_res = global_info['decode_res']
108
+ c_list = torch.linspace(-1.2, 1.2, steps=res)
109
+ grid_x, grid_y, grid_z = torch.meshgrid(
110
+ c_list, c_list, c_list, indexing='ij'
111
+ )
112
+ coords = torch.stack([grid_x, grid_y, grid_z], -1).to(device)
113
+ plane_axes = generate_planes()
114
+ feats = sample_from_planes(
115
+ plane_axes, decode_res[b:b+1].reshape(1, 3, -1, 256, 256), coords.reshape(1, -1, 3), padding_mode='zeros', box_warp=2.4
116
+ )
117
+ fake_dirs = torch.zeros_like(coords)
118
+ fake_dirs[..., 0] = 1
119
+ out = model.first_stage_model.triplane_decoder.decoder(feats, fake_dirs)
120
+ u = out['sigma'].reshape(res, res, res).detach().cpu().numpy()
121
+ del out
122
+
123
+ # marching cube
124
+ vertices, triangles = mcubes.marching_cubes(u, 10)
125
+ min_bound = np.array([-1.2, -1.2, -1.2])
126
+ max_bound = np.array([1.2, 1.2, 1.2])
127
+ vertices = vertices / (res - 1) * (max_bound - min_bound)[None, :] + min_bound[None, :]
128
+ pt_vertices = torch.from_numpy(vertices).to(device)
129
+
130
+ # extract vertices color
131
+ res_triplane = 256
132
+ render_kwargs = {
133
+ 'depth_resolution': 128,
134
+ 'disparity_space_sampling': False,
135
+ 'box_warp': 2.4,
136
+ 'depth_resolution_importance': 128,
137
+ 'clamp_mode': 'softplus',
138
+ 'white_back': True,
139
+ 'det': True
140
+ }
141
+ rays_o_list = [
142
+ np.array([0, 0, 2]),
143
+ np.array([0, 0, -2]),
144
+ np.array([0, 2, 0]),
145
+ np.array([0, -2, 0]),
146
+ np.array([2, 0, 0]),
147
+ np.array([-2, 0, 0]),
148
+ ]
149
+ rgb_final = None
150
+ diff_final = None
151
+ for rays_o in tqdm(rays_o_list):
152
+ rays_o = torch.from_numpy(rays_o.reshape(1, 3)).repeat(vertices.shape[0], 1).float().to(device)
153
+ rays_d = pt_vertices.reshape(-1, 3) - rays_o
154
+ rays_d = rays_d / torch.norm(rays_d, dim=-1).reshape(-1, 1)
155
+ dist = torch.norm(pt_vertices.reshape(-1, 3) - rays_o, dim=-1).cpu().numpy().reshape(-1)
156
+
157
+ render_out = model.first_stage_model.triplane_decoder(
158
+ decode_res[b:b+1].reshape(1, 3, -1, res_triplane, res_triplane),
159
+ rays_o.unsqueeze(0), rays_d.unsqueeze(0), render_kwargs,
160
+ whole_img=False, tvloss=False
161
+ )
162
+ rgb = render_out['rgb_marched'].reshape(-1, 3).detach().cpu().numpy()
163
+ depth = render_out['depth_final'].reshape(-1).detach().cpu().numpy()
164
+ depth_diff = np.abs(dist - depth)
165
+
166
+ if rgb_final is None:
167
+ rgb_final = rgb.copy()
168
+ diff_final = depth_diff.copy()
169
+
170
+ else:
171
+ ind = diff_final > depth_diff
172
+ rgb_final[ind] = rgb[ind]
173
+ diff_final[ind] = depth_diff[ind]
174
+
175
+ # bgr to rgb
176
+ rgb_final = np.stack([
177
+ rgb_final[:, 2], rgb_final[:, 1], rgb_final[:, 0]
178
+ ], -1)
179
+
180
+ # export to ply
181
+ mesh = trimesh.Trimesh(vertices, triangles, vertex_colors=(rgb_final * 255).astype(np.uint8))
182
+ path = os.path.join('tmp', f"{text.replace(' ', '_')}_{str(datetime.datetime.now()).replace(' ', '_')}.ply")
183
+ trimesh.exchange.export.export_mesh(mesh, path, file_type='ply')
184
+
185
+ del vertices, triangles, rgb_final
186
+ torch.cuda.empty_cache()
187
+
188
+ return path
189
+
190
+ def infer(prompt, samples, steps, scale, seed, global_info):
191
+ prompt = prompt.replace('/', '')
192
+ pl.seed_everything(seed)
193
+ batch_size = samples
194
+ with torch.no_grad():
195
+ noise = None
196
+ c = model.get_learned_conditioning([prompt])
197
+ unconditional_c = torch.zeros_like(c)
198
+ sample, _ = sampler.sample(
199
+ S=steps,
200
+ batch_size=batch_size,
201
+ shape=shape,
202
+ verbose=False,
203
+ x_T = noise,
204
+ conditioning = c.repeat(batch_size, 1, 1),
205
+ unconditional_guidance_scale=scale,
206
+ unconditional_conditioning=unconditional_c.repeat(batch_size, 1, 1)
207
+ )
208
+ decode_res = model.decode_first_stage(sample)
209
+
210
+ big_video_list = []
211
+
212
+ global_info['decode_res'] = decode_res
213
+
214
+ for b in range(batch_size):
215
+ def render_img(v):
216
+ rgb_sample, _ = model.first_stage_model.render_triplane_eg3d_decoder(
217
+ decode_res[b:b+1], batch_rays_list[v:v+1].to(device), torch.zeros(1, H, H, 3).to(device),
218
+ )
219
+ rgb_sample = to8b(rgb_sample.detach().cpu().numpy())[0]
220
+ rgb_sample = np.stack(
221
+ [rgb_sample[..., 2], rgb_sample[..., 1], rgb_sample[..., 0]], -1
222
+ )
223
+ rgb_sample = add_text(rgb_sample, str(b))
224
+ return rgb_sample
225
+
226
+ view_num = len(batch_rays_list)
227
+ video_list = []
228
+ for v in tqdm(range(view_num//8*3, view_num//8*5, 2)):
229
+ rgb_sample = render_img(v)
230
+ video_list.append(rgb_sample)
231
+ big_video_list.append(video_list)
232
+ # if batch_size == 2:
233
+ # cat_video_list = [
234
+ # np.concatenate([big_video_list[j][i] for j in range(len(big_video_list))], 1) \
235
+ # for i in range(len(big_video_list[0]))
236
+ # ]
237
+ # elif batch_size > 2:
238
+ # if batch_size == 3:
239
+ # big_video_list.append(
240
+ # [np.zeros_like(f) for f in big_video_list[0]]
241
+ # )
242
+ # cat_video_list = [
243
+ # np.concatenate([
244
+ # np.concatenate([big_video_list[0][i], big_video_list[1][i]], 1),
245
+ # np.concatenate([big_video_list[2][i], big_video_list[3][i]], 1),
246
+ # ], 0) \
247
+ # for i in range(len(big_video_list[0]))
248
+ # ]
249
+ # else:
250
+ # cat_video_list = big_video_list[0]
251
+
252
+ for _ in range(4 - batch_size):
253
+ big_video_list.append(
254
+ [np.zeros_like(f) + 255 for f in big_video_list[0]]
255
+ )
256
+ cat_video_list = [
257
+ np.concatenate([
258
+ np.concatenate([big_video_list[0][i], big_video_list[1][i]], 1),
259
+ np.concatenate([big_video_list[2][i], big_video_list[3][i]], 1),
260
+ ], 0) \
261
+ for i in range(len(big_video_list[0]))
262
+ ]
263
+
264
+ path = f"tmp/{prompt.replace(' ', '_')}_{str(datetime.datetime.now()).replace(' ', '_')}.mp4"
265
+ imageio.mimwrite(path, np.stack(cat_video_list, 0))
266
+
267
+ return global_info, path
268
+
269
+ def infer_stage2(prompt, selection, seed, global_info):
270
+ prompt = prompt.replace('/', '')
271
+ mesh_path = marching_cube(int(selection), prompt, global_info)
272
+ mesh_name = mesh_path.split('/')[-1][:-4]
273
+
274
+ if2_cmd = f"threefiner if2 --mesh {mesh_path} --prompt \"{prompt}\" --outdir tmp --save {mesh_name}_if2.glb --text_dir --front_dir=-y"
275
+ print(if2_cmd)
276
+ # os.system(if2_cmd)
277
+ subprocess.Popen(if2_cmd, shell=True).wait()
278
+ torch.cuda.empty_cache()
279
+
280
+ video_path = f"tmp/{prompt.replace(' ', '_')}_{str(datetime.datetime.now()).replace(' ', '_')}.mp4"
281
+ render_cmd = f"kire {os.path.join('tmp', mesh_name + '_if2.glb')} --save_video {video_path} --wogui --force_cuda_rast --H 256 --W 256"
282
+ print(render_cmd)
283
+ # os.system(render_cmd)
284
+ subprocess.Popen(render_cmd, shell=True).wait()
285
+ torch.cuda.empty_cache()
286
+
287
+ return video_path, os.path.join('tmp', mesh_name + '_if2.glb')
288
+
289
+ block = gr.Blocks()
290
+
291
+ with block:
292
+ global_info = gr.State(dict())
293
+ with gr.Row():
294
+ with gr.Column():
295
+ with gr.Row():
296
+ text = gr.Textbox(
297
+ label = "Enter your prompt",
298
+ max_lines = 1,
299
+ placeholder = "Enter your prompt",
300
+ container = False,
301
+ )
302
+ btn = gr.Button("Generate 3D")
303
+ gallery = gr.Video(height=512)
304
+ advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
305
+ with gr.Row(elem_id="advanced-options"):
306
+ samples = gr.Slider(label="Number of Samples", minimum=1, maximum=4, value=4, step=1)
307
+ steps = gr.Slider(label="Steps", minimum=1, maximum=500, value=50, step=1)
308
+ scale = gr.Slider(
309
+ label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1
310
+ )
311
+ seed = gr.Slider(
312
+ label="Seed",
313
+ minimum=0,
314
+ maximum=2147483647,
315
+ step=1,
316
+ randomize=True,
317
+ )
318
+ gr.on([text.submit, btn.click], infer, inputs=[text, samples, steps, scale, seed, global_info], outputs=[global_info, gallery])
319
+ advanced_button.click(
320
+ None,
321
+ [],
322
+ text,
323
+ )
324
+ with gr.Column():
325
+ with gr.Row():
326
+ dropdown = gr.Dropdown(
327
+ ['0', '1', '2', '3'], label="Choose a Candidate For Stage2", value='0'
328
+ )
329
+ btn_stage2 = gr.Button("Start Refinement")
330
+ gallery = gr.Video(height=512)
331
+ download = gr.File(label="Download Mesh", file_count="single", height=100)
332
+ gr.on([btn_stage2.click], infer_stage2, inputs=[text, dropdown, seed, global_info], outputs=[gallery, download])
333
+
334
+ block.launch(share=True)
3DTopia/ldm/data/__init__.py ADDED
File without changes
3DTopia/ldm/data/base.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset
3
+
4
+
5
+ class Txt2ImgIterableBaseDataset(IterableDataset):
6
+ '''
7
+ Define an interface to make the IterableDatasets for text2img data chainable
8
+ '''
9
+ def __init__(self, num_records=0, valid_ids=None, size=256):
10
+ super().__init__()
11
+ self.num_records = num_records
12
+ self.valid_ids = valid_ids
13
+ self.sample_ids = valid_ids
14
+ self.size = size
15
+
16
+ print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.')
17
+
18
+ def __len__(self):
19
+ return self.num_records
20
+
21
+ @abstractmethod
22
+ def __iter__(self):
23
+ pass
3DTopia/ldm/data/imagenet.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, yaml, pickle, shutil, tarfile, glob
2
+ import cv2
3
+ import albumentations
4
+ import PIL
5
+ import numpy as np
6
+ import torchvision.transforms.functional as TF
7
+ from omegaconf import OmegaConf
8
+ from functools import partial
9
+ from PIL import Image
10
+ from tqdm import tqdm
11
+ from torch.utils.data import Dataset, Subset
12
+
13
+ import taming.data.utils as tdu
14
+ from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve
15
+ from taming.data.imagenet import ImagePaths
16
+
17
+ from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light
18
+
19
+
20
+ def synset2idx(path_to_yaml="data/index_synset.yaml"):
21
+ with open(path_to_yaml) as f:
22
+ di2s = yaml.load(f)
23
+ return dict((v,k) for k,v in di2s.items())
24
+
25
+
26
+ class ImageNetBase(Dataset):
27
+ def __init__(self, config=None):
28
+ self.config = config or OmegaConf.create()
29
+ if not type(self.config)==dict:
30
+ self.config = OmegaConf.to_container(self.config)
31
+ self.keep_orig_class_label = self.config.get("keep_orig_class_label", False)
32
+ self.process_images = True # if False we skip loading & processing images and self.data contains filepaths
33
+ self._prepare()
34
+ self._prepare_synset_to_human()
35
+ self._prepare_idx_to_synset()
36
+ self._prepare_human_to_integer_label()
37
+ self._load()
38
+
39
+ def __len__(self):
40
+ return len(self.data)
41
+
42
+ def __getitem__(self, i):
43
+ return self.data[i]
44
+
45
+ def _prepare(self):
46
+ raise NotImplementedError()
47
+
48
+ def _filter_relpaths(self, relpaths):
49
+ ignore = set([
50
+ "n06596364_9591.JPEG",
51
+ ])
52
+ relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore]
53
+ if "sub_indices" in self.config:
54
+ indices = str_to_indices(self.config["sub_indices"])
55
+ synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings
56
+ self.synset2idx = synset2idx(path_to_yaml=self.idx2syn)
57
+ files = []
58
+ for rpath in relpaths:
59
+ syn = rpath.split("/")[0]
60
+ if syn in synsets:
61
+ files.append(rpath)
62
+ return files
63
+ else:
64
+ return relpaths
65
+
66
+ def _prepare_synset_to_human(self):
67
+ SIZE = 2655750
68
+ URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1"
69
+ self.human_dict = os.path.join(self.root, "synset_human.txt")
70
+ if (not os.path.exists(self.human_dict) or
71
+ not os.path.getsize(self.human_dict)==SIZE):
72
+ download(URL, self.human_dict)
73
+
74
+ def _prepare_idx_to_synset(self):
75
+ URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1"
76
+ self.idx2syn = os.path.join(self.root, "index_synset.yaml")
77
+ if (not os.path.exists(self.idx2syn)):
78
+ download(URL, self.idx2syn)
79
+
80
+ def _prepare_human_to_integer_label(self):
81
+ URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1"
82
+ self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt")
83
+ if (not os.path.exists(self.human2integer)):
84
+ download(URL, self.human2integer)
85
+ with open(self.human2integer, "r") as f:
86
+ lines = f.read().splitlines()
87
+ assert len(lines) == 1000
88
+ self.human2integer_dict = dict()
89
+ for line in lines:
90
+ value, key = line.split(":")
91
+ self.human2integer_dict[key] = int(value)
92
+
93
+ def _load(self):
94
+ with open(self.txt_filelist, "r") as f:
95
+ self.relpaths = f.read().splitlines()
96
+ l1 = len(self.relpaths)
97
+ self.relpaths = self._filter_relpaths(self.relpaths)
98
+ print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths)))
99
+
100
+ self.synsets = [p.split("/")[0] for p in self.relpaths]
101
+ self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]
102
+
103
+ unique_synsets = np.unique(self.synsets)
104
+ class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets))
105
+ if not self.keep_orig_class_label:
106
+ self.class_labels = [class_dict[s] for s in self.synsets]
107
+ else:
108
+ self.class_labels = [self.synset2idx[s] for s in self.synsets]
109
+
110
+ with open(self.human_dict, "r") as f:
111
+ human_dict = f.read().splitlines()
112
+ human_dict = dict(line.split(maxsplit=1) for line in human_dict)
113
+
114
+ self.human_labels = [human_dict[s] for s in self.synsets]
115
+
116
+ labels = {
117
+ "relpath": np.array(self.relpaths),
118
+ "synsets": np.array(self.synsets),
119
+ "class_label": np.array(self.class_labels),
120
+ "human_label": np.array(self.human_labels),
121
+ }
122
+
123
+ if self.process_images:
124
+ self.size = retrieve(self.config, "size", default=256)
125
+ self.data = ImagePaths(self.abspaths,
126
+ labels=labels,
127
+ size=self.size,
128
+ random_crop=self.random_crop,
129
+ )
130
+ else:
131
+ self.data = self.abspaths
132
+
133
+
134
+ class ImageNetTrain(ImageNetBase):
135
+ NAME = "ILSVRC2012_train"
136
+ URL = "http://www.image-net.org/challenges/LSVRC/2012/"
137
+ AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2"
138
+ FILES = [
139
+ "ILSVRC2012_img_train.tar",
140
+ ]
141
+ SIZES = [
142
+ 147897477120,
143
+ ]
144
+
145
+ def __init__(self, process_images=True, data_root=None, **kwargs):
146
+ self.process_images = process_images
147
+ self.data_root = data_root
148
+ super().__init__(**kwargs)
149
+
150
+ def _prepare(self):
151
+ if self.data_root:
152
+ self.root = os.path.join(self.data_root, self.NAME)
153
+ else:
154
+ cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
155
+ self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
156
+
157
+ self.datadir = os.path.join(self.root, "data")
158
+ self.txt_filelist = os.path.join(self.root, "filelist.txt")
159
+ self.expected_length = 1281167
160
+ self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop",
161
+ default=True)
162
+ if not tdu.is_prepared(self.root):
163
+ # prep
164
+ print("Preparing dataset {} in {}".format(self.NAME, self.root))
165
+
166
+ datadir = self.datadir
167
+ if not os.path.exists(datadir):
168
+ path = os.path.join(self.root, self.FILES[0])
169
+ if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
170
+ import academictorrents as at
171
+ atpath = at.get(self.AT_HASH, datastore=self.root)
172
+ assert atpath == path
173
+
174
+ print("Extracting {} to {}".format(path, datadir))
175
+ os.makedirs(datadir, exist_ok=True)
176
+ with tarfile.open(path, "r:") as tar:
177
+ tar.extractall(path=datadir)
178
+
179
+ print("Extracting sub-tars.")
180
+ subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar")))
181
+ for subpath in tqdm(subpaths):
182
+ subdir = subpath[:-len(".tar")]
183
+ os.makedirs(subdir, exist_ok=True)
184
+ with tarfile.open(subpath, "r:") as tar:
185
+ tar.extractall(path=subdir)
186
+
187
+ filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
188
+ filelist = [os.path.relpath(p, start=datadir) for p in filelist]
189
+ filelist = sorted(filelist)
190
+ filelist = "\n".join(filelist)+"\n"
191
+ with open(self.txt_filelist, "w") as f:
192
+ f.write(filelist)
193
+
194
+ tdu.mark_prepared(self.root)
195
+
196
+
197
+ class ImageNetValidation(ImageNetBase):
198
+ NAME = "ILSVRC2012_validation"
199
+ URL = "http://www.image-net.org/challenges/LSVRC/2012/"
200
+ AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5"
201
+ VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1"
202
+ FILES = [
203
+ "ILSVRC2012_img_val.tar",
204
+ "validation_synset.txt",
205
+ ]
206
+ SIZES = [
207
+ 6744924160,
208
+ 1950000,
209
+ ]
210
+
211
+ def __init__(self, process_images=True, data_root=None, **kwargs):
212
+ self.data_root = data_root
213
+ self.process_images = process_images
214
+ super().__init__(**kwargs)
215
+
216
+ def _prepare(self):
217
+ if self.data_root:
218
+ self.root = os.path.join(self.data_root, self.NAME)
219
+ else:
220
+ cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
221
+ self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
222
+ self.datadir = os.path.join(self.root, "data")
223
+ self.txt_filelist = os.path.join(self.root, "filelist.txt")
224
+ self.expected_length = 50000
225
+ self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop",
226
+ default=False)
227
+ if not tdu.is_prepared(self.root):
228
+ # prep
229
+ print("Preparing dataset {} in {}".format(self.NAME, self.root))
230
+
231
+ datadir = self.datadir
232
+ if not os.path.exists(datadir):
233
+ path = os.path.join(self.root, self.FILES[0])
234
+ if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
235
+ import academictorrents as at
236
+ atpath = at.get(self.AT_HASH, datastore=self.root)
237
+ assert atpath == path
238
+
239
+ print("Extracting {} to {}".format(path, datadir))
240
+ os.makedirs(datadir, exist_ok=True)
241
+ with tarfile.open(path, "r:") as tar:
242
+ tar.extractall(path=datadir)
243
+
244
+ vspath = os.path.join(self.root, self.FILES[1])
245
+ if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]:
246
+ download(self.VS_URL, vspath)
247
+
248
+ with open(vspath, "r") as f:
249
+ synset_dict = f.read().splitlines()
250
+ synset_dict = dict(line.split() for line in synset_dict)
251
+
252
+ print("Reorganizing into synset folders")
253
+ synsets = np.unique(list(synset_dict.values()))
254
+ for s in synsets:
255
+ os.makedirs(os.path.join(datadir, s), exist_ok=True)
256
+ for k, v in synset_dict.items():
257
+ src = os.path.join(datadir, k)
258
+ dst = os.path.join(datadir, v)
259
+ shutil.move(src, dst)
260
+
261
+ filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
262
+ filelist = [os.path.relpath(p, start=datadir) for p in filelist]
263
+ filelist = sorted(filelist)
264
+ filelist = "\n".join(filelist)+"\n"
265
+ with open(self.txt_filelist, "w") as f:
266
+ f.write(filelist)
267
+
268
+ tdu.mark_prepared(self.root)
269
+
270
+
271
+
272
+ class ImageNetSR(Dataset):
273
+ def __init__(self, size=None,
274
+ degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1.,
275
+ random_crop=True):
276
+ """
277
+ Imagenet Superresolution Dataloader
278
+ Performs following ops in order:
279
+ 1. crops a crop of size s from image either as random or center crop
280
+ 2. resizes crop to size with cv2.area_interpolation
281
+ 3. degrades resized crop with degradation_fn
282
+
283
+ :param size: resizing to size after cropping
284
+ :param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light
285
+ :param downscale_f: Low Resolution Downsample factor
286
+ :param min_crop_f: determines crop size s,
287
+ where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f)
288
+ :param max_crop_f: ""
289
+ :param data_root:
290
+ :param random_crop:
291
+ """
292
+ self.base = self.get_base()
293
+ assert size
294
+ assert (size / downscale_f).is_integer()
295
+ self.size = size
296
+ self.LR_size = int(size / downscale_f)
297
+ self.min_crop_f = min_crop_f
298
+ self.max_crop_f = max_crop_f
299
+ assert(max_crop_f <= 1.)
300
+ self.center_crop = not random_crop
301
+
302
+ self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA)
303
+
304
+ self.pil_interpolation = False # gets reset later if incase interp_op is from pillow
305
+
306
+ if degradation == "bsrgan":
307
+ self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f)
308
+
309
+ elif degradation == "bsrgan_light":
310
+ self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f)
311
+
312
+ else:
313
+ interpolation_fn = {
314
+ "cv_nearest": cv2.INTER_NEAREST,
315
+ "cv_bilinear": cv2.INTER_LINEAR,
316
+ "cv_bicubic": cv2.INTER_CUBIC,
317
+ "cv_area": cv2.INTER_AREA,
318
+ "cv_lanczos": cv2.INTER_LANCZOS4,
319
+ "pil_nearest": PIL.Image.NEAREST,
320
+ "pil_bilinear": PIL.Image.BILINEAR,
321
+ "pil_bicubic": PIL.Image.BICUBIC,
322
+ "pil_box": PIL.Image.BOX,
323
+ "pil_hamming": PIL.Image.HAMMING,
324
+ "pil_lanczos": PIL.Image.LANCZOS,
325
+ }[degradation]
326
+
327
+ self.pil_interpolation = degradation.startswith("pil_")
328
+
329
+ if self.pil_interpolation:
330
+ self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn)
331
+
332
+ else:
333
+ self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size,
334
+ interpolation=interpolation_fn)
335
+
336
+ def __len__(self):
337
+ return len(self.base)
338
+
339
+ def __getitem__(self, i):
340
+ example = self.base[i]
341
+ image = Image.open(example["file_path_"])
342
+
343
+ if not image.mode == "RGB":
344
+ image = image.convert("RGB")
345
+
346
+ image = np.array(image).astype(np.uint8)
347
+
348
+ min_side_len = min(image.shape[:2])
349
+ crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None)
350
+ crop_side_len = int(crop_side_len)
351
+
352
+ if self.center_crop:
353
+ self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len)
354
+
355
+ else:
356
+ self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len)
357
+
358
+ image = self.cropper(image=image)["image"]
359
+ image = self.image_rescaler(image=image)["image"]
360
+
361
+ if self.pil_interpolation:
362
+ image_pil = PIL.Image.fromarray(image)
363
+ LR_image = self.degradation_process(image_pil)
364
+ LR_image = np.array(LR_image).astype(np.uint8)
365
+
366
+ else:
367
+ LR_image = self.degradation_process(image=image)["image"]
368
+
369
+ example["image"] = (image/127.5 - 1.0).astype(np.float32)
370
+ example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32)
371
+
372
+ return example
373
+
374
+
375
+ class ImageNetSRTrain(ImageNetSR):
376
+ def __init__(self, **kwargs):
377
+ super().__init__(**kwargs)
378
+
379
+ def get_base(self):
380
+ with open("data/imagenet_train_hr_indices.p", "rb") as f:
381
+ indices = pickle.load(f)
382
+ dset = ImageNetTrain(process_images=False,)
383
+ return Subset(dset, indices)
384
+
385
+
386
+ class ImageNetSRValidation(ImageNetSR):
387
+ def __init__(self, **kwargs):
388
+ super().__init__(**kwargs)
389
+
390
+ def get_base(self):
391
+ with open("data/imagenet_val_hr_indices.p", "rb") as f:
392
+ indices = pickle.load(f)
393
+ dset = ImageNetValidation(process_images=False,)
394
+ return Subset(dset, indices)
3DTopia/ldm/data/lsun.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import PIL
4
+ from PIL import Image
5
+ from torch.utils.data import Dataset
6
+ from torchvision import transforms
7
+
8
+
9
+ class LSUNBase(Dataset):
10
+ def __init__(self,
11
+ txt_file,
12
+ data_root,
13
+ size=None,
14
+ interpolation="bicubic",
15
+ flip_p=0.5
16
+ ):
17
+ self.data_paths = txt_file
18
+ self.data_root = data_root
19
+ with open(self.data_paths, "r") as f:
20
+ self.image_paths = f.read().splitlines()
21
+ self._length = len(self.image_paths)
22
+ self.labels = {
23
+ "relative_file_path_": [l for l in self.image_paths],
24
+ "file_path_": [os.path.join(self.data_root, l)
25
+ for l in self.image_paths],
26
+ }
27
+
28
+ self.size = size
29
+ self.interpolation = {"linear": PIL.Image.LINEAR,
30
+ "bilinear": PIL.Image.BILINEAR,
31
+ "bicubic": PIL.Image.BICUBIC,
32
+ "lanczos": PIL.Image.LANCZOS,
33
+ }[interpolation]
34
+ self.flip = transforms.RandomHorizontalFlip(p=flip_p)
35
+
36
+ def __len__(self):
37
+ return self._length
38
+
39
+ def __getitem__(self, i):
40
+ example = dict((k, self.labels[k][i]) for k in self.labels)
41
+ image = Image.open(example["file_path_"])
42
+ if not image.mode == "RGB":
43
+ image = image.convert("RGB")
44
+
45
+ # default to score-sde preprocessing
46
+ img = np.array(image).astype(np.uint8)
47
+ crop = min(img.shape[0], img.shape[1])
48
+ h, w, = img.shape[0], img.shape[1]
49
+ img = img[(h - crop) // 2:(h + crop) // 2,
50
+ (w - crop) // 2:(w + crop) // 2]
51
+
52
+ image = Image.fromarray(img)
53
+ if self.size is not None:
54
+ image = image.resize((self.size, self.size), resample=self.interpolation)
55
+
56
+ image = self.flip(image)
57
+ image = np.array(image).astype(np.uint8)
58
+ example["image"] = (image / 127.5 - 1.0).astype(np.float32)
59
+ return example
60
+
61
+
62
+ class LSUNChurchesTrain(LSUNBase):
63
+ def __init__(self, **kwargs):
64
+ super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs)
65
+
66
+
67
+ class LSUNChurchesValidation(LSUNBase):
68
+ def __init__(self, flip_p=0., **kwargs):
69
+ super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches",
70
+ flip_p=flip_p, **kwargs)
71
+
72
+
73
+ class LSUNBedroomsTrain(LSUNBase):
74
+ def __init__(self, **kwargs):
75
+ super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs)
76
+
77
+
78
+ class LSUNBedroomsValidation(LSUNBase):
79
+ def __init__(self, flip_p=0.0, **kwargs):
80
+ super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms",
81
+ flip_p=flip_p, **kwargs)
82
+
83
+
84
+ class LSUNCatsTrain(LSUNBase):
85
+ def __init__(self, **kwargs):
86
+ super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs)
87
+
88
+
89
+ class LSUNCatsValidation(LSUNBase):
90
+ def __init__(self, flip_p=0., **kwargs):
91
+ super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats",
92
+ flip_p=flip_p, **kwargs)
3DTopia/ldm/lr_scheduler.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ class LambdaWarmUpCosineScheduler:
5
+ """
6
+ note: use with a base_lr of 1.0
7
+ """
8
+ def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
9
+ self.lr_warm_up_steps = warm_up_steps
10
+ self.lr_start = lr_start
11
+ self.lr_min = lr_min
12
+ self.lr_max = lr_max
13
+ self.lr_max_decay_steps = max_decay_steps
14
+ self.last_lr = 0.
15
+ self.verbosity_interval = verbosity_interval
16
+
17
+ def schedule(self, n, **kwargs):
18
+ if self.verbosity_interval > 0:
19
+ if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
20
+ if n < self.lr_warm_up_steps:
21
+ lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start
22
+ self.last_lr = lr
23
+ return lr
24
+ else:
25
+ t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)
26
+ t = min(t, 1.0)
27
+ lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
28
+ 1 + np.cos(t * np.pi))
29
+ self.last_lr = lr
30
+ return lr
31
+
32
+ def __call__(self, n, **kwargs):
33
+ return self.schedule(n,**kwargs)
34
+
35
+
36
+ class LambdaWarmUpCosineScheduler2:
37
+ """
38
+ supports repeated iterations, configurable via lists
39
+ note: use with a base_lr of 1.0.
40
+ """
41
+ def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
42
+ assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths)
43
+ self.lr_warm_up_steps = warm_up_steps
44
+ self.f_start = f_start
45
+ self.f_min = f_min
46
+ self.f_max = f_max
47
+ self.cycle_lengths = cycle_lengths
48
+ self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths))
49
+ self.last_f = 0.
50
+ self.verbosity_interval = verbosity_interval
51
+
52
+ def find_in_interval(self, n):
53
+ interval = 0
54
+ for cl in self.cum_cycles[1:]:
55
+ if n <= cl:
56
+ return interval
57
+ interval += 1
58
+
59
+ def schedule(self, n, **kwargs):
60
+ cycle = self.find_in_interval(n)
61
+ n = n - self.cum_cycles[cycle]
62
+ if self.verbosity_interval > 0:
63
+ if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
64
+ f"current cycle {cycle}")
65
+ if n < self.lr_warm_up_steps[cycle]:
66
+ f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
67
+ self.last_f = f
68
+ return f
69
+ else:
70
+ t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle])
71
+ t = min(t, 1.0)
72
+ f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * (
73
+ 1 + np.cos(t * np.pi))
74
+ self.last_f = f
75
+ return f
76
+
77
+ def __call__(self, n, **kwargs):
78
+ return self.schedule(n, **kwargs)
79
+
80
+
81
+ class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
82
+
83
+ def schedule(self, n, **kwargs):
84
+ cycle = self.find_in_interval(n)
85
+ n = n - self.cum_cycles[cycle]
86
+ if self.verbosity_interval > 0:
87
+ if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
88
+ f"current cycle {cycle}")
89
+
90
+ if n < self.lr_warm_up_steps[cycle]:
91
+ f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
92
+ self.last_f = f
93
+ return f
94
+ else:
95
+ f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle])
96
+ self.last_f = f
97
+ return f
98
+
3DTopia/ldm/models/autoencoder.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import pytorch_lightning as pl
3
+ import torch.nn.functional as F
4
+ from contextlib import contextmanager
5
+
6
+ from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
7
+
8
+ from ldm.modules.diffusionmodules.model import Encoder, Decoder
9
+ from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
10
+
11
+ from ldm.util import instantiate_from_config
12
+
13
+
14
+ class VQModel(pl.LightningModule):
15
+ def __init__(self,
16
+ ddconfig,
17
+ lossconfig,
18
+ n_embed,
19
+ embed_dim,
20
+ ckpt_path=None,
21
+ ignore_keys=[],
22
+ image_key="image",
23
+ colorize_nlabels=None,
24
+ monitor=None,
25
+ batch_resize_range=None,
26
+ scheduler_config=None,
27
+ lr_g_factor=1.0,
28
+ remap=None,
29
+ sane_index_shape=False, # tell vector quantizer to return indices as bhw
30
+ use_ema=False
31
+ ):
32
+ super().__init__()
33
+ self.embed_dim = embed_dim
34
+ self.n_embed = n_embed
35
+ self.image_key = image_key
36
+ self.encoder = Encoder(**ddconfig)
37
+ self.decoder = Decoder(**ddconfig)
38
+ self.loss = instantiate_from_config(lossconfig)
39
+ self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
40
+ remap=remap,
41
+ sane_index_shape=sane_index_shape)
42
+ self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
43
+ self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
44
+ if colorize_nlabels is not None:
45
+ assert type(colorize_nlabels)==int
46
+ self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
47
+ if monitor is not None:
48
+ self.monitor = monitor
49
+ self.batch_resize_range = batch_resize_range
50
+ if self.batch_resize_range is not None:
51
+ print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
52
+
53
+ self.use_ema = use_ema
54
+ if self.use_ema:
55
+ self.model_ema = LitEma(self)
56
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
57
+
58
+ if ckpt_path is not None:
59
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
60
+ self.scheduler_config = scheduler_config
61
+ self.lr_g_factor = lr_g_factor
62
+
63
+ @contextmanager
64
+ def ema_scope(self, context=None):
65
+ if self.use_ema:
66
+ self.model_ema.store(self.parameters())
67
+ self.model_ema.copy_to(self)
68
+ if context is not None:
69
+ print(f"{context}: Switched to EMA weights")
70
+ try:
71
+ yield None
72
+ finally:
73
+ if self.use_ema:
74
+ self.model_ema.restore(self.parameters())
75
+ if context is not None:
76
+ print(f"{context}: Restored training weights")
77
+
78
+ def init_from_ckpt(self, path, ignore_keys=list()):
79
+ sd = torch.load(path, map_location="cpu")["state_dict"]
80
+ keys = list(sd.keys())
81
+ for k in keys:
82
+ for ik in ignore_keys:
83
+ if k.startswith(ik):
84
+ print("Deleting key {} from state_dict.".format(k))
85
+ del sd[k]
86
+ missing, unexpected = self.load_state_dict(sd, strict=False)
87
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
88
+ if len(missing) > 0:
89
+ print(f"Missing Keys: {missing}")
90
+ print(f"Unexpected Keys: {unexpected}")
91
+
92
+ def on_train_batch_end(self, *args, **kwargs):
93
+ if self.use_ema:
94
+ self.model_ema(self)
95
+
96
+ def encode(self, x):
97
+ h = self.encoder(x)
98
+ h = self.quant_conv(h)
99
+ quant, emb_loss, info = self.quantize(h)
100
+ return quant, emb_loss, info
101
+
102
+ def encode_to_prequant(self, x):
103
+ h = self.encoder(x)
104
+ h = self.quant_conv(h)
105
+ return h
106
+
107
+ def decode(self, quant):
108
+ quant = self.post_quant_conv(quant)
109
+ dec = self.decoder(quant)
110
+ return dec
111
+
112
+ def decode_code(self, code_b):
113
+ quant_b = self.quantize.embed_code(code_b)
114
+ dec = self.decode(quant_b)
115
+ return dec
116
+
117
+ def forward(self, input, return_pred_indices=False):
118
+ quant, diff, (_,_,ind) = self.encode(input)
119
+ dec = self.decode(quant)
120
+ if return_pred_indices:
121
+ return dec, diff, ind
122
+ return dec, diff
123
+
124
+ def get_input(self, batch, k):
125
+ x = batch[k]
126
+ if len(x.shape) == 3:
127
+ x = x[..., None]
128
+ x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
129
+ if self.batch_resize_range is not None:
130
+ lower_size = self.batch_resize_range[0]
131
+ upper_size = self.batch_resize_range[1]
132
+ if self.global_step <= 4:
133
+ # do the first few batches with max size to avoid later oom
134
+ new_resize = upper_size
135
+ else:
136
+ new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
137
+ if new_resize != x.shape[2]:
138
+ x = F.interpolate(x, size=new_resize, mode="bicubic")
139
+ x = x.detach()
140
+ return x
141
+
142
+ def training_step(self, batch, batch_idx, optimizer_idx):
143
+ # https://github.com/pytorch/pytorch/issues/37142
144
+ # try not to fool the heuristics
145
+ x = self.get_input(batch, self.image_key)
146
+ xrec, qloss, ind = self(x, return_pred_indices=True)
147
+
148
+ if optimizer_idx == 0:
149
+ # autoencode
150
+ aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
151
+ last_layer=self.get_last_layer(), split="train",
152
+ predicted_indices=ind)
153
+
154
+ self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
155
+ return aeloss
156
+
157
+ if optimizer_idx == 1:
158
+ # discriminator
159
+ discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
160
+ last_layer=self.get_last_layer(), split="train")
161
+ self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
162
+ return discloss
163
+
164
+ def validation_step(self, batch, batch_idx):
165
+ log_dict = self._validation_step(batch, batch_idx)
166
+ with self.ema_scope():
167
+ log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
168
+ return log_dict
169
+
170
+ def _validation_step(self, batch, batch_idx, suffix=""):
171
+ x = self.get_input(batch, self.image_key)
172
+ xrec, qloss, ind = self(x, return_pred_indices=True)
173
+ aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
174
+ self.global_step,
175
+ last_layer=self.get_last_layer(),
176
+ split="val"+suffix,
177
+ predicted_indices=ind
178
+ )
179
+
180
+ discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
181
+ self.global_step,
182
+ last_layer=self.get_last_layer(),
183
+ split="val"+suffix,
184
+ predicted_indices=ind
185
+ )
186
+ rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
187
+ self.log(f"val{suffix}/rec_loss", rec_loss,
188
+ prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
189
+ self.log(f"val{suffix}/aeloss", aeloss,
190
+ prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
191
+ if version.parse(pl.__version__) >= version.parse('1.4.0'):
192
+ del log_dict_ae[f"val{suffix}/rec_loss"]
193
+ self.log_dict(log_dict_ae)
194
+ self.log_dict(log_dict_disc)
195
+ return self.log_dict
196
+
197
+ def configure_optimizers(self):
198
+ lr_d = self.learning_rate
199
+ lr_g = self.lr_g_factor*self.learning_rate
200
+ print("lr_d", lr_d)
201
+ print("lr_g", lr_g)
202
+ opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
203
+ list(self.decoder.parameters())+
204
+ list(self.quantize.parameters())+
205
+ list(self.quant_conv.parameters())+
206
+ list(self.post_quant_conv.parameters()),
207
+ lr=lr_g, betas=(0.5, 0.9))
208
+ opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
209
+ lr=lr_d, betas=(0.5, 0.9))
210
+
211
+ if self.scheduler_config is not None:
212
+ scheduler = instantiate_from_config(self.scheduler_config)
213
+
214
+ print("Setting up LambdaLR scheduler...")
215
+ scheduler = [
216
+ {
217
+ 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
218
+ 'interval': 'step',
219
+ 'frequency': 1
220
+ },
221
+ {
222
+ 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
223
+ 'interval': 'step',
224
+ 'frequency': 1
225
+ },
226
+ ]
227
+ return [opt_ae, opt_disc], scheduler
228
+ return [opt_ae, opt_disc], []
229
+
230
+ def get_last_layer(self):
231
+ return self.decoder.conv_out.weight
232
+
233
+ def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
234
+ log = dict()
235
+ x = self.get_input(batch, self.image_key)
236
+ x = x.to(self.device)
237
+ if only_inputs:
238
+ log["inputs"] = x
239
+ return log
240
+ xrec, _ = self(x)
241
+ if x.shape[1] > 3:
242
+ # colorize with random projection
243
+ assert xrec.shape[1] > 3
244
+ x = self.to_rgb(x)
245
+ xrec = self.to_rgb(xrec)
246
+ log["inputs"] = x
247
+ log["reconstructions"] = xrec
248
+ if plot_ema:
249
+ with self.ema_scope():
250
+ xrec_ema, _ = self(x)
251
+ if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
252
+ log["reconstructions_ema"] = xrec_ema
253
+ return log
254
+
255
+ def to_rgb(self, x):
256
+ assert self.image_key == "segmentation"
257
+ if not hasattr(self, "colorize"):
258
+ self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
259
+ x = F.conv2d(x, weight=self.colorize)
260
+ x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
261
+ return x
262
+
263
+
264
+ class VQModelInterface(VQModel):
265
+ def __init__(self, embed_dim, *args, **kwargs):
266
+ super().__init__(embed_dim=embed_dim, *args, **kwargs)
267
+ self.embed_dim = embed_dim
268
+
269
+ def encode(self, x):
270
+ h = self.encoder(x)
271
+ h = self.quant_conv(h)
272
+ return h
273
+
274
+ def decode(self, h, force_not_quantize=False):
275
+ # also go through quantization layer
276
+ if not force_not_quantize:
277
+ quant, emb_loss, info = self.quantize(h)
278
+ else:
279
+ quant = h
280
+ quant = self.post_quant_conv(quant)
281
+ dec = self.decoder(quant)
282
+ return dec
283
+
284
+
285
+ class AutoencoderKL(pl.LightningModule):
286
+ def __init__(self,
287
+ ddconfig,
288
+ lossconfig,
289
+ embed_dim,
290
+ ckpt_path=None,
291
+ ignore_keys=[],
292
+ image_key="image",
293
+ colorize_nlabels=None,
294
+ monitor=None,
295
+ ):
296
+ super().__init__()
297
+ self.image_key = image_key
298
+ self.encoder = Encoder(**ddconfig)
299
+ self.decoder = Decoder(**ddconfig)
300
+ self.loss = instantiate_from_config(lossconfig)
301
+ assert ddconfig["double_z"]
302
+ self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
303
+ self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
304
+ self.embed_dim = embed_dim
305
+ if colorize_nlabels is not None:
306
+ assert type(colorize_nlabels)==int
307
+ self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
308
+ if monitor is not None:
309
+ self.monitor = monitor
310
+ if ckpt_path is not None:
311
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
312
+
313
+ def init_from_ckpt(self, path, ignore_keys=list()):
314
+ sd = torch.load(path, map_location="cpu")["state_dict"]
315
+ keys = list(sd.keys())
316
+ for k in keys:
317
+ for ik in ignore_keys:
318
+ if k.startswith(ik):
319
+ print("Deleting key {} from state_dict.".format(k))
320
+ del sd[k]
321
+ self.load_state_dict(sd, strict=False)
322
+ print(f"Restored from {path}")
323
+
324
+ def encode(self, x):
325
+ h = self.encoder(x)
326
+ moments = self.quant_conv(h)
327
+ posterior = DiagonalGaussianDistribution(moments)
328
+ return posterior
329
+
330
+ def decode(self, z):
331
+ z = self.post_quant_conv(z)
332
+ dec = self.decoder(z)
333
+ return dec
334
+
335
+ def forward(self, input, sample_posterior=True):
336
+ posterior = self.encode(input)
337
+ if sample_posterior:
338
+ z = posterior.sample()
339
+ else:
340
+ z = posterior.mode()
341
+ dec = self.decode(z)
342
+ return dec, posterior
343
+
344
+ def get_input(self, batch, k):
345
+ x = batch[k]
346
+ if len(x.shape) == 3:
347
+ x = x[..., None]
348
+ x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
349
+ return x
350
+
351
+ def training_step(self, batch, batch_idx, optimizer_idx):
352
+ inputs = self.get_input(batch, self.image_key)
353
+ reconstructions, posterior = self(inputs)
354
+
355
+ if optimizer_idx == 0:
356
+ # train encoder+decoder+logvar
357
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
358
+ last_layer=self.get_last_layer(), split="train")
359
+ self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
360
+ self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
361
+ return aeloss
362
+
363
+ if optimizer_idx == 1:
364
+ # train the discriminator
365
+ discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
366
+ last_layer=self.get_last_layer(), split="train")
367
+
368
+ self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
369
+ self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
370
+ return discloss
371
+
372
+ def validation_step(self, batch, batch_idx):
373
+ inputs = self.get_input(batch, self.image_key)
374
+ reconstructions, posterior = self(inputs)
375
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
376
+ last_layer=self.get_last_layer(), split="val")
377
+
378
+ discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
379
+ last_layer=self.get_last_layer(), split="val")
380
+
381
+ self.log("val/rec_loss", log_dict_ae["val/rec_loss"])
382
+ self.log_dict(log_dict_ae)
383
+ self.log_dict(log_dict_disc)
384
+ return self.log_dict
385
+
386
+ def configure_optimizers(self):
387
+ lr = self.learning_rate
388
+ opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
389
+ list(self.decoder.parameters())+
390
+ list(self.quant_conv.parameters())+
391
+ list(self.post_quant_conv.parameters()),
392
+ lr=lr, betas=(0.5, 0.9))
393
+ opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
394
+ lr=lr, betas=(0.5, 0.9))
395
+ return [opt_ae, opt_disc], []
396
+
397
+ def get_last_layer(self):
398
+ return self.decoder.conv_out.weight
399
+
400
+ @torch.no_grad()
401
+ def log_images(self, batch, only_inputs=False, **kwargs):
402
+ log = dict()
403
+ x = self.get_input(batch, self.image_key)
404
+ x = x.to(self.device)
405
+ if not only_inputs:
406
+ xrec, posterior = self(x)
407
+ if x.shape[1] > 3:
408
+ # colorize with random projection
409
+ assert xrec.shape[1] > 3
410
+ x = self.to_rgb(x)
411
+ xrec = self.to_rgb(xrec)
412
+ log["samples"] = self.decode(torch.randn_like(posterior.sample()))
413
+ log["reconstructions"] = xrec
414
+ log["inputs"] = x
415
+ return log
416
+
417
+ def to_rgb(self, x):
418
+ assert self.image_key == "segmentation"
419
+ if not hasattr(self, "colorize"):
420
+ self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
421
+ x = F.conv2d(x, weight=self.colorize)
422
+ x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
423
+ return x
424
+
425
+
426
+ class IdentityFirstStage(torch.nn.Module):
427
+ def __init__(self, *args, vq_interface=False, **kwargs):
428
+ self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff
429
+ super().__init__()
430
+
431
+ def encode(self, x, *args, **kwargs):
432
+ return x
433
+
434
+ def decode(self, x, *args, **kwargs):
435
+ return x
436
+
437
+ def quantize(self, x, *args, **kwargs):
438
+ if self.vq_interface:
439
+ return x, None, [None, None, None]
440
+ return x
441
+
442
+ def forward(self, x, *args, **kwargs):
443
+ return x
3DTopia/ldm/models/diffusion/__init__.py ADDED
File without changes
3DTopia/ldm/models/diffusion/classifier.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import pytorch_lightning as pl
4
+ from omegaconf import OmegaConf
5
+ from torch.nn import functional as F
6
+ from torch.optim import AdamW
7
+ from torch.optim.lr_scheduler import LambdaLR
8
+ from copy import deepcopy
9
+ from einops import rearrange
10
+ from glob import glob
11
+ from natsort import natsorted
12
+
13
+ from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel
14
+ from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config
15
+
16
+ __models__ = {
17
+ 'class_label': EncoderUNetModel,
18
+ 'segmentation': UNetModel
19
+ }
20
+
21
+
22
+ def disabled_train(self, mode=True):
23
+ """Overwrite model.train with this function to make sure train/eval mode
24
+ does not change anymore."""
25
+ return self
26
+
27
+
28
+ class NoisyLatentImageClassifier(pl.LightningModule):
29
+
30
+ def __init__(self,
31
+ diffusion_path,
32
+ num_classes,
33
+ ckpt_path=None,
34
+ pool='attention',
35
+ label_key=None,
36
+ diffusion_ckpt_path=None,
37
+ scheduler_config=None,
38
+ weight_decay=1.e-2,
39
+ log_steps=10,
40
+ monitor='val/loss',
41
+ *args,
42
+ **kwargs):
43
+ super().__init__(*args, **kwargs)
44
+ self.num_classes = num_classes
45
+ # get latest config of diffusion model
46
+ diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1]
47
+ self.diffusion_config = OmegaConf.load(diffusion_config).model
48
+ self.diffusion_config.params.ckpt_path = diffusion_ckpt_path
49
+ self.load_diffusion()
50
+
51
+ self.monitor = monitor
52
+ self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1
53
+ self.log_time_interval = self.diffusion_model.num_timesteps // log_steps
54
+ self.log_steps = log_steps
55
+
56
+ self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \
57
+ else self.diffusion_model.cond_stage_key
58
+
59
+ assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params'
60
+
61
+ if self.label_key not in __models__:
62
+ raise NotImplementedError()
63
+
64
+ self.load_classifier(ckpt_path, pool)
65
+
66
+ self.scheduler_config = scheduler_config
67
+ self.use_scheduler = self.scheduler_config is not None
68
+ self.weight_decay = weight_decay
69
+
70
+ def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
71
+ sd = torch.load(path, map_location="cpu")
72
+ if "state_dict" in list(sd.keys()):
73
+ sd = sd["state_dict"]
74
+ keys = list(sd.keys())
75
+ for k in keys:
76
+ for ik in ignore_keys:
77
+ if k.startswith(ik):
78
+ print("Deleting key {} from state_dict.".format(k))
79
+ del sd[k]
80
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
81
+ sd, strict=False)
82
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
83
+ if len(missing) > 0:
84
+ print(f"Missing Keys: {missing}")
85
+ if len(unexpected) > 0:
86
+ print(f"Unexpected Keys: {unexpected}")
87
+
88
+ def load_diffusion(self):
89
+ model = instantiate_from_config(self.diffusion_config)
90
+ self.diffusion_model = model.eval()
91
+ self.diffusion_model.train = disabled_train
92
+ for param in self.diffusion_model.parameters():
93
+ param.requires_grad = False
94
+
95
+ def load_classifier(self, ckpt_path, pool):
96
+ model_config = deepcopy(self.diffusion_config.params.unet_config.params)
97
+ model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels
98
+ model_config.out_channels = self.num_classes
99
+ if self.label_key == 'class_label':
100
+ model_config.pool = pool
101
+
102
+ self.model = __models__[self.label_key](**model_config)
103
+ if ckpt_path is not None:
104
+ print('#####################################################################')
105
+ print(f'load from ckpt "{ckpt_path}"')
106
+ print('#####################################################################')
107
+ self.init_from_ckpt(ckpt_path)
108
+
109
+ @torch.no_grad()
110
+ def get_x_noisy(self, x, t, noise=None):
111
+ noise = default(noise, lambda: torch.randn_like(x))
112
+ continuous_sqrt_alpha_cumprod = None
113
+ if self.diffusion_model.use_continuous_noise:
114
+ continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1)
115
+ # todo: make sure t+1 is correct here
116
+
117
+ return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise,
118
+ continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod)
119
+
120
+ def forward(self, x_noisy, t, *args, **kwargs):
121
+ return self.model(x_noisy, t)
122
+
123
+ @torch.no_grad()
124
+ def get_input(self, batch, k):
125
+ x = batch[k]
126
+ if len(x.shape) == 3:
127
+ x = x[..., None]
128
+ x = rearrange(x, 'b h w c -> b c h w')
129
+ x = x.to(memory_format=torch.contiguous_format).float()
130
+ return x
131
+
132
+ @torch.no_grad()
133
+ def get_conditioning(self, batch, k=None):
134
+ if k is None:
135
+ k = self.label_key
136
+ assert k is not None, 'Needs to provide label key'
137
+
138
+ targets = batch[k].to(self.device)
139
+
140
+ if self.label_key == 'segmentation':
141
+ targets = rearrange(targets, 'b h w c -> b c h w')
142
+ for down in range(self.numd):
143
+ h, w = targets.shape[-2:]
144
+ targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest')
145
+
146
+ # targets = rearrange(targets,'b c h w -> b h w c')
147
+
148
+ return targets
149
+
150
+ def compute_top_k(self, logits, labels, k, reduction="mean"):
151
+ _, top_ks = torch.topk(logits, k, dim=1)
152
+ if reduction == "mean":
153
+ return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
154
+ elif reduction == "none":
155
+ return (top_ks == labels[:, None]).float().sum(dim=-1)
156
+
157
+ def on_train_epoch_start(self):
158
+ # save some memory
159
+ self.diffusion_model.model.to('cpu')
160
+
161
+ @torch.no_grad()
162
+ def write_logs(self, loss, logits, targets):
163
+ log_prefix = 'train' if self.training else 'val'
164
+ log = {}
165
+ log[f"{log_prefix}/loss"] = loss.mean()
166
+ log[f"{log_prefix}/acc@1"] = self.compute_top_k(
167
+ logits, targets, k=1, reduction="mean"
168
+ )
169
+ log[f"{log_prefix}/acc@5"] = self.compute_top_k(
170
+ logits, targets, k=5, reduction="mean"
171
+ )
172
+
173
+ self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True)
174
+ self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False)
175
+ self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True)
176
+ lr = self.optimizers().param_groups[0]['lr']
177
+ self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True)
178
+
179
+ def shared_step(self, batch, t=None):
180
+ x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key)
181
+ targets = self.get_conditioning(batch)
182
+ if targets.dim() == 4:
183
+ targets = targets.argmax(dim=1)
184
+ if t is None:
185
+ t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long()
186
+ else:
187
+ t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long()
188
+ x_noisy = self.get_x_noisy(x, t)
189
+ logits = self(x_noisy, t)
190
+
191
+ loss = F.cross_entropy(logits, targets, reduction='none')
192
+
193
+ self.write_logs(loss.detach(), logits.detach(), targets.detach())
194
+
195
+ loss = loss.mean()
196
+ return loss, logits, x_noisy, targets
197
+
198
+ def training_step(self, batch, batch_idx):
199
+ loss, *_ = self.shared_step(batch)
200
+ return loss
201
+
202
+ def reset_noise_accs(self):
203
+ self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in
204
+ range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)}
205
+
206
+ def on_validation_start(self):
207
+ self.reset_noise_accs()
208
+
209
+ @torch.no_grad()
210
+ def validation_step(self, batch, batch_idx):
211
+ loss, *_ = self.shared_step(batch)
212
+
213
+ for t in self.noisy_acc:
214
+ _, logits, _, targets = self.shared_step(batch, t)
215
+ self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean'))
216
+ self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean'))
217
+
218
+ return loss
219
+
220
+ def configure_optimizers(self):
221
+ optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
222
+
223
+ if self.use_scheduler:
224
+ scheduler = instantiate_from_config(self.scheduler_config)
225
+
226
+ print("Setting up LambdaLR scheduler...")
227
+ scheduler = [
228
+ {
229
+ 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule),
230
+ 'interval': 'step',
231
+ 'frequency': 1
232
+ }]
233
+ return [optimizer], scheduler
234
+
235
+ return optimizer
236
+
237
+ @torch.no_grad()
238
+ def log_images(self, batch, N=8, *args, **kwargs):
239
+ log = dict()
240
+ x = self.get_input(batch, self.diffusion_model.first_stage_key)
241
+ log['inputs'] = x
242
+
243
+ y = self.get_conditioning(batch)
244
+
245
+ if self.label_key == 'class_label':
246
+ y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
247
+ log['labels'] = y
248
+
249
+ if ismap(y):
250
+ log['labels'] = self.diffusion_model.to_rgb(y)
251
+
252
+ for step in range(self.log_steps):
253
+ current_time = step * self.log_time_interval
254
+
255
+ _, logits, x_noisy, _ = self.shared_step(batch, t=current_time)
256
+
257
+ log[f'inputs@t{current_time}'] = x_noisy
258
+
259
+ pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes)
260
+ pred = rearrange(pred, 'b h w c -> b c h w')
261
+
262
+ log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred)
263
+
264
+ for key in log:
265
+ log[key] = log[key][:N]
266
+
267
+ return log
3DTopia/ldm/models/diffusion/ddim.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SAMPLING ONLY."""
2
+
3
+ import torch
4
+ import numpy as np
5
+ from tqdm import tqdm
6
+ from functools import partial
7
+
8
+ from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, \
9
+ extract_into_tensor
10
+
11
+
12
+ class DDIMSampler(object):
13
+ def __init__(self, model, schedule="linear", **kwargs):
14
+ super().__init__()
15
+ self.model = model
16
+ self.ddpm_num_timesteps = model.num_timesteps
17
+ self.schedule = schedule
18
+
19
+ def register_buffer(self, name, attr):
20
+ if type(attr) == torch.Tensor:
21
+ if attr.device != torch.device("cuda"):
22
+ attr = attr.to(torch.device("cuda"))
23
+ setattr(self, name, attr)
24
+
25
+ def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
26
+ self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
27
+ num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
28
+ alphas_cumprod = self.model.alphas_cumprod
29
+ assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
30
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
31
+
32
+ self.register_buffer('betas', to_torch(self.model.betas))
33
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
34
+ self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
35
+
36
+ # calculations for diffusion q(x_t | x_{t-1}) and others
37
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
38
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
39
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
40
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
41
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
42
+
43
+ # ddim sampling parameters
44
+ ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
45
+ ddim_timesteps=self.ddim_timesteps,
46
+ eta=ddim_eta,verbose=verbose)
47
+ self.register_buffer('ddim_sigmas', ddim_sigmas)
48
+ self.register_buffer('ddim_alphas', ddim_alphas)
49
+ self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
50
+ self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
51
+ sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
52
+ (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
53
+ 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
54
+ self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
55
+
56
+ @torch.no_grad()
57
+ def sample(self,
58
+ S,
59
+ batch_size,
60
+ shape,
61
+ conditioning=None,
62
+ callback=None,
63
+ normals_sequence=None,
64
+ img_callback=None,
65
+ quantize_x0=False,
66
+ eta=0.,
67
+ mask=None,
68
+ x0=None,
69
+ temperature=1.,
70
+ noise_dropout=0.,
71
+ score_corrector=None,
72
+ corrector_kwargs=None,
73
+ verbose=True,
74
+ x_T=None,
75
+ log_every_t=100,
76
+ unconditional_guidance_scale=1.,
77
+ unconditional_conditioning=None,
78
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
79
+ **kwargs
80
+ ):
81
+ if conditioning is not None:
82
+ if isinstance(conditioning, dict):
83
+ cbs = conditioning[list(conditioning.keys())[0]].shape[0]
84
+ if cbs != batch_size:
85
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
86
+ else:
87
+ if conditioning.shape[0] != batch_size:
88
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
89
+
90
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
91
+ # sampling
92
+ C, H, W = shape
93
+ size = (batch_size, C, H, W)
94
+ print(f'Data shape for DDIM sampling is {size}, eta {eta}')
95
+
96
+ samples, intermediates = self.ddim_sampling(conditioning, size,
97
+ callback=callback,
98
+ img_callback=img_callback,
99
+ quantize_denoised=quantize_x0,
100
+ mask=mask, x0=x0,
101
+ ddim_use_original_steps=False,
102
+ noise_dropout=noise_dropout,
103
+ temperature=temperature,
104
+ score_corrector=score_corrector,
105
+ corrector_kwargs=corrector_kwargs,
106
+ x_T=x_T,
107
+ log_every_t=log_every_t,
108
+ unconditional_guidance_scale=unconditional_guidance_scale,
109
+ unconditional_conditioning=unconditional_conditioning,
110
+ )
111
+ return samples, intermediates
112
+
113
+ @torch.no_grad()
114
+ def ddim_sampling(self, cond, shape,
115
+ x_T=None, ddim_use_original_steps=False,
116
+ callback=None, timesteps=None, quantize_denoised=False,
117
+ mask=None, x0=None, img_callback=None, log_every_t=100,
118
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
119
+ unconditional_guidance_scale=1., unconditional_conditioning=None,):
120
+ device = self.model.betas.device
121
+ b = shape[0]
122
+ if x_T is None:
123
+ img = torch.randn(shape, device=device)
124
+ else:
125
+ img = x_T
126
+
127
+ if timesteps is None:
128
+ timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
129
+ elif timesteps is not None and not ddim_use_original_steps:
130
+ subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
131
+ timesteps = self.ddim_timesteps[:subset_end]
132
+
133
+ intermediates = {'x_inter': [img], 'pred_x0': [img]}
134
+ time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
135
+ total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
136
+ print(f"Running DDIM Sampling with {total_steps} timesteps")
137
+
138
+ iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
139
+
140
+ for i, step in enumerate(iterator):
141
+ index = total_steps - i - 1
142
+ ts = torch.full((b,), step, device=device, dtype=torch.long)
143
+
144
+ if mask is not None:
145
+ assert x0 is not None
146
+ img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
147
+ img = img_orig * mask + (1. - mask) * img
148
+
149
+ outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
150
+ quantize_denoised=quantize_denoised, temperature=temperature,
151
+ noise_dropout=noise_dropout, score_corrector=score_corrector,
152
+ corrector_kwargs=corrector_kwargs,
153
+ unconditional_guidance_scale=unconditional_guidance_scale,
154
+ unconditional_conditioning=unconditional_conditioning)
155
+ img, pred_x0 = outs
156
+ if callback: callback(i)
157
+ if img_callback: img_callback(pred_x0, i)
158
+
159
+ if index % log_every_t == 0 or index == total_steps - 1:
160
+ intermediates['x_inter'].append(img)
161
+ intermediates['pred_x0'].append(pred_x0)
162
+
163
+ return img, intermediates
164
+
165
+ @torch.no_grad()
166
+ def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
167
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
168
+ unconditional_guidance_scale=1., unconditional_conditioning=None):
169
+ b, *_, device = *x.shape, x.device
170
+
171
+ if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
172
+ e_t = self.model.apply_model(x, t, c)
173
+ else:
174
+ x_in = torch.cat([x] * 2)
175
+ t_in = torch.cat([t] * 2)
176
+ c_in = torch.cat([unconditional_conditioning, c])
177
+ e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
178
+ e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
179
+
180
+ if score_corrector is not None:
181
+ assert self.model.parameterization == "eps"
182
+ e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
183
+
184
+ alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
185
+ alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
186
+ sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
187
+ sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
188
+ # select parameters corresponding to the currently considered timestep
189
+ a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
190
+ a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
191
+ sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
192
+ sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
193
+
194
+ # current prediction for x_0
195
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
196
+ if quantize_denoised:
197
+ pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
198
+ # direction pointing to x_t
199
+ dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
200
+ noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
201
+ if noise_dropout > 0.:
202
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
203
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
204
+ return x_prev, pred_x0
205
+
206
+ @torch.no_grad()
207
+ def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
208
+ # fast, but does not allow for exact reconstruction
209
+ # t serves as an index to gather the correct alphas
210
+ if use_original_steps:
211
+ sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
212
+ sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
213
+ else:
214
+ sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
215
+ sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
216
+
217
+ if noise is None:
218
+ noise = torch.randn_like(x0)
219
+ return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
220
+ extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
221
+
222
+ @torch.no_grad()
223
+ def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
224
+ use_original_steps=False):
225
+
226
+ timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
227
+ timesteps = timesteps[:t_start]
228
+
229
+ time_range = np.flip(timesteps)
230
+ total_steps = timesteps.shape[0]
231
+ print(f"Running DDIM Sampling with {total_steps} timesteps")
232
+
233
+ iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
234
+ x_dec = x_latent
235
+ for i, step in enumerate(iterator):
236
+ index = total_steps - i - 1
237
+ ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
238
+ x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
239
+ unconditional_guidance_scale=unconditional_guidance_scale,
240
+ unconditional_conditioning=unconditional_conditioning)
241
+ return x_dec
3DTopia/ldm/models/diffusion/ddpm.py ADDED
@@ -0,0 +1,1746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ wild mixture of
3
+ https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
+ https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
+ https://github.com/CompVis/taming-transformers
6
+ -- merci
7
+ """
8
+
9
+ import os
10
+ import wandb
11
+ import torch
12
+ import imageio
13
+ import torch.nn as nn
14
+ import numpy as np
15
+ import pytorch_lightning as pl
16
+ from torch.optim.lr_scheduler import LambdaLR
17
+ from einops import rearrange, repeat
18
+ from contextlib import contextmanager
19
+ from functools import partial
20
+ from tqdm import tqdm
21
+ from torchvision.utils import make_grid
22
+ from pytorch_lightning.utilities.rank_zero import rank_zero_only
23
+
24
+ from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
25
+ from ldm.modules.ema import LitEma
26
+ from module.model_2d import DiagonalGaussianDistribution
27
+ from ldm.modules.distributions.distributions import normal_kl
28
+ from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
29
+ from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
30
+ from ldm.models.diffusion.ddim import DDIMSampler
31
+ from utility.triplane_renderer.renderer import to8b
32
+
33
+
34
+ __conditioning_keys__ = {'concat': 'c_concat',
35
+ 'crossattn': 'c_crossattn',
36
+ 'adm': 'y'}
37
+
38
+
39
+ def disabled_train(self, mode=True):
40
+ """Overwrite model.train with this function to make sure train/eval mode
41
+ does not change anymore."""
42
+ return self
43
+
44
+
45
+ def uniform_on_device(r1, r2, shape, device):
46
+ return (r1 - r2) * torch.rand(*shape, device=device) + r2
47
+
48
+
49
+ class DDPM(pl.LightningModule):
50
+ # classic DDPM with Gaussian diffusion, in image space
51
+ def __init__(self,
52
+ unet_config,
53
+ timesteps=1000,
54
+ beta_schedule="linear",
55
+ loss_type="l2",
56
+ ckpt_path=None,
57
+ ignore_keys=[],
58
+ load_only_unet=False,
59
+ monitor="val/loss",
60
+ use_ema=True,
61
+ first_stage_key="image",
62
+ image_size=256,
63
+ channels=3,
64
+ log_every_t=100,
65
+ clip_denoised=True,
66
+ linear_start=1e-4,
67
+ linear_end=2e-2,
68
+ cosine_s=8e-3,
69
+ given_betas=None,
70
+ original_elbo_weight=0.,
71
+ v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
72
+ l_simple_weight=1.,
73
+ conditioning_key=None,
74
+ parameterization="eps", # all assuming fixed variance schedules
75
+ scheduler_config=None,
76
+ use_positional_encodings=False,
77
+ learn_logvar=False,
78
+ logvar_init=0.,
79
+ learning_rate=1e-4,
80
+ shift_scale=None,
81
+ ):
82
+ super().__init__()
83
+ assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
84
+ self.parameterization = parameterization
85
+ print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
86
+ self.cond_stage_model = None
87
+ self.clip_denoised = clip_denoised
88
+ self.log_every_t = log_every_t
89
+ self.first_stage_key = first_stage_key
90
+ self.image_size = image_size # try conv?
91
+ self.channels = channels
92
+ self.use_positional_encodings = use_positional_encodings
93
+ self.beta_schedule = beta_schedule
94
+ self.model = DiffusionWrapper(unet_config, conditioning_key)
95
+ count_params(self.model, verbose=True)
96
+ self.use_ema = use_ema
97
+ if self.use_ema:
98
+ self.model_ema = LitEma(self.model)
99
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
100
+
101
+ self.use_scheduler = scheduler_config is not None
102
+ if self.use_scheduler:
103
+ self.scheduler_config = scheduler_config
104
+
105
+ self.v_posterior = v_posterior
106
+ self.original_elbo_weight = original_elbo_weight
107
+ self.l_simple_weight = l_simple_weight
108
+
109
+ if monitor is not None:
110
+ self.monitor = monitor
111
+ if ckpt_path is not None:
112
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
113
+
114
+ self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
115
+ linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, shift_scale=shift_scale)
116
+
117
+ self.loss_type = loss_type
118
+
119
+ self.learn_logvar = learn_logvar
120
+ self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
121
+ if self.learn_logvar:
122
+ self.logvar = nn.Parameter(self.logvar, requires_grad=True)
123
+
124
+ self.learning_rate = learning_rate
125
+
126
+
127
+ def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
128
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, shift_scale=None):
129
+ if exists(given_betas):
130
+ betas = given_betas
131
+ else:
132
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
133
+ cosine_s=cosine_s, shift_scale=shift_scale)
134
+ alphas = 1. - betas
135
+ alphas_cumprod = np.cumprod(alphas, axis=0)
136
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
137
+
138
+ timesteps, = betas.shape
139
+ self.num_timesteps = int(timesteps)
140
+ print("Using timesteps of {}".format(self.num_timesteps))
141
+ self.linear_start = linear_start
142
+ self.linear_end = linear_end
143
+ assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
144
+
145
+ to_torch = partial(torch.tensor, dtype=torch.float32)
146
+
147
+ self.register_buffer('betas', to_torch(betas))
148
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
149
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
150
+
151
+ # calculations for diffusion q(x_t | x_{t-1}) and others
152
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
153
+ # print("sqrt_alphas_cumprod", np.sqrt(alphas_cumprod))
154
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
155
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
156
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
157
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
158
+
159
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
160
+ posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
161
+ 1. - alphas_cumprod) + self.v_posterior * betas
162
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
163
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
164
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
165
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
166
+ self.register_buffer('posterior_mean_coef1', to_torch(
167
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
168
+ self.register_buffer('posterior_mean_coef2', to_torch(
169
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
170
+
171
+ if self.parameterization == "eps":
172
+ lvlb_weights = self.betas ** 2 / (
173
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
174
+ elif self.parameterization == "x0":
175
+ lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
176
+ elif self.parameterization == "v":
177
+ lvlb_weights = torch.ones_like(self.betas ** 2 / (
178
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
179
+ else:
180
+ raise NotImplementedError("mu not supported")
181
+ # TODO how to choose this term
182
+ lvlb_weights[0] = lvlb_weights[1]
183
+ self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
184
+ assert not torch.isnan(self.lvlb_weights).all()
185
+
186
+ @contextmanager
187
+ def ema_scope(self, context=None):
188
+ if self.use_ema:
189
+ self.model_ema.store(self.model.parameters())
190
+ self.model_ema.copy_to(self.model)
191
+ if context is not None:
192
+ print(f"{context}: Switched to EMA weights")
193
+ try:
194
+ yield None
195
+ finally:
196
+ if self.use_ema:
197
+ self.model_ema.restore(self.model.parameters())
198
+ if context is not None:
199
+ print(f"{context}: Restored training weights")
200
+
201
+ def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
202
+ sd = torch.load(path, map_location="cpu")
203
+ if "state_dict" in list(sd.keys()):
204
+ sd = sd["state_dict"]
205
+ keys = list(sd.keys())
206
+ for k in keys:
207
+ for ik in ignore_keys:
208
+ if k.startswith(ik):
209
+ print("Deleting key {} from state_dict.".format(k))
210
+ del sd[k]
211
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
212
+ sd, strict=False)
213
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
214
+ if len(missing) > 0:
215
+ print(f"Missing Keys: {missing}")
216
+ if len(unexpected) > 0:
217
+ print(f"Unexpected Keys: {unexpected}")
218
+
219
+ def q_mean_variance(self, x_start, t):
220
+ """
221
+ Get the distribution q(x_t | x_0).
222
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
223
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
224
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
225
+ """
226
+ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
227
+ variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
228
+ log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
229
+ return mean, variance, log_variance
230
+
231
+ def predict_start_from_noise(self, x_t, t, noise):
232
+ return (
233
+ extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
234
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
235
+ )
236
+
237
+ def predict_start_from_z_and_v(self, x_t, t, v):
238
+ # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
239
+ # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
240
+ return (
241
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
242
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
243
+ )
244
+
245
+ def predict_eps_from_z_and_v(self, x_t, t, v):
246
+ return (
247
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
248
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
249
+ )
250
+
251
+ def q_posterior(self, x_start, x_t, t):
252
+ posterior_mean = (
253
+ extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
254
+ extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
255
+ )
256
+ posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
257
+ posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
258
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
259
+
260
+ def p_mean_variance(self, x, t, clip_denoised: bool):
261
+ model_out = self.model(x, t)
262
+ if self.parameterization == "eps":
263
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
264
+ elif self.parameterization == "x0":
265
+ x_recon = model_out
266
+ elif self.parameterization == "v":
267
+ x_recon = self.predict_start_from_z_and_v(x, t, model_out)
268
+ if clip_denoised:
269
+ x_recon.clamp_(-1., 1.)
270
+
271
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
272
+ return model_mean, posterior_variance, posterior_log_variance
273
+
274
+ @torch.no_grad()
275
+ def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
276
+ b, *_, device = *x.shape, x.device
277
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
278
+ noise = noise_like(x.shape, device, repeat_noise)
279
+ # no noise when t == 0
280
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
281
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
282
+
283
+ @torch.no_grad()
284
+ def p_sample_loop(self, shape, return_intermediates=False):
285
+ device = self.betas.device
286
+ b = shape[0]
287
+ img = torch.randn(shape, device=device)
288
+ intermediates = [img]
289
+ for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
290
+ img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
291
+ clip_denoised=self.clip_denoised)
292
+ if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
293
+ intermediates.append(img)
294
+ if return_intermediates:
295
+ return img, intermediates
296
+ return img
297
+
298
+ @torch.no_grad()
299
+ def sample(self, batch_size=16, return_intermediates=False):
300
+ image_size = self.image_size
301
+ channels = self.channels
302
+ return self.p_sample_loop((batch_size, channels, image_size, image_size),
303
+ return_intermediates=return_intermediates)
304
+
305
+ def q_sample(self, x_start, t, noise=None):
306
+ noise = default(noise, lambda: torch.randn_like(x_start))
307
+ return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
308
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
309
+
310
+ def get_v(self, x, noise, t):
311
+ return (
312
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
313
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
314
+ )
315
+
316
+ def get_loss(self, pred, target, mean=True):
317
+ if self.loss_type == 'l1':
318
+ loss = (target - pred).abs()
319
+ if mean:
320
+ loss = loss.mean()
321
+ elif self.loss_type == 'l2':
322
+ if mean:
323
+ loss = torch.nn.functional.mse_loss(target, pred)
324
+ else:
325
+ loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
326
+ else:
327
+ raise NotImplementedError("unknown loss type '{loss_type}'")
328
+
329
+ return loss
330
+
331
+ def p_losses(self, x_start, t, noise=None):
332
+ noise = default(noise, lambda: torch.randn_like(x_start))
333
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
334
+ model_out = self.model(x_noisy, t)
335
+
336
+ loss_dict = {}
337
+ if self.parameterization == "eps":
338
+ target = noise
339
+ elif self.parameterization == "x0":
340
+ target = x_start
341
+ elif self.parameterization == "v":
342
+ target = self.get_v(x_start, noise, t)
343
+ else:
344
+ raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
345
+
346
+ loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
347
+
348
+ log_prefix = 'train' if self.training else 'val'
349
+
350
+ loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
351
+ loss_simple = loss.mean() * self.l_simple_weight
352
+
353
+ loss_vlb = (self.lvlb_weights[t] * loss).mean()
354
+ loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
355
+
356
+ loss = loss_simple + self.original_elbo_weight * loss_vlb
357
+
358
+ loss_dict.update({f'{log_prefix}/loss': loss})
359
+
360
+ return loss, loss_dict
361
+
362
+ def forward(self, x, *args, **kwargs):
363
+ # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
364
+ # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
365
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
366
+ return self.p_losses(x, t, *args, **kwargs)
367
+
368
+ def get_input(self, batch, k):
369
+ x = batch[k]
370
+ if isinstance(x, list):
371
+ return x
372
+ if len(x.shape) == 3:
373
+ x = x[..., None]
374
+ # x = rearrange(x, 'b h w c -> b c h w')
375
+ x = x.to(memory_format=torch.contiguous_format).float()
376
+ return x
377
+
378
+ def shared_step(self, batch):
379
+ x = self.get_input(batch, self.first_stage_key)
380
+ loss, loss_dict = self(x)
381
+ return loss, loss_dict
382
+
383
+ def training_step(self, batch, batch_idx):
384
+ loss, loss_dict = self.shared_step(batch)
385
+
386
+ self.log_dict(loss_dict, prog_bar=False,
387
+ logger=True, on_step=True, on_epoch=True)
388
+
389
+ self.log("global_step", self.global_step,
390
+ prog_bar=False, logger=True, on_step=True, on_epoch=False)
391
+
392
+ if self.use_scheduler:
393
+ lr = self.optimizers().param_groups[0]['lr']
394
+ self.log('lr_abs', lr, prog_bar=False, logger=True, on_step=True, on_epoch=False)
395
+
396
+ return loss
397
+
398
+ @torch.no_grad()
399
+ def validation_step(self, batch, batch_idx):
400
+ _, loss_dict_no_ema = self.shared_step(batch)
401
+ with self.ema_scope():
402
+ _, loss_dict_ema = self.shared_step(batch)
403
+ loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
404
+ self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
405
+ self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
406
+
407
+ def on_train_batch_end(self, *args, **kwargs):
408
+ if self.use_ema:
409
+ self.model_ema(self.model)
410
+
411
+ def _get_rows_from_list(self, samples):
412
+ n_imgs_per_row = len(samples)
413
+ denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
414
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
415
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
416
+ return denoise_grid
417
+
418
+ @torch.no_grad()
419
+ def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
420
+ log = dict()
421
+ x = self.get_input(batch, self.first_stage_key)
422
+ N = min(x.shape[0], N)
423
+ n_row = min(x.shape[0], n_row)
424
+ x = x.to(self.device)[:N]
425
+ log["inputs"] = x
426
+
427
+ # get diffusion row
428
+ diffusion_row = list()
429
+ x_start = x[:n_row]
430
+
431
+ for t in range(self.num_timesteps):
432
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
433
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
434
+ t = t.to(self.device).long()
435
+ noise = torch.randn_like(x_start)
436
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
437
+ diffusion_row.append(x_noisy)
438
+
439
+ log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
440
+
441
+ if sample:
442
+ # get denoise row
443
+ with self.ema_scope("Plotting"):
444
+ samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
445
+
446
+ log["samples"] = samples
447
+ log["denoise_row"] = self._get_rows_from_list(denoise_row)
448
+
449
+ if return_keys:
450
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
451
+ return log
452
+ else:
453
+ return {key: log[key] for key in return_keys}
454
+ return log
455
+
456
+ def configure_optimizers(self):
457
+ lr = self.learning_rate
458
+ params = list(self.model.parameters())
459
+ if self.learn_logvar:
460
+ params = params + [self.logvar]
461
+ opt = torch.optim.AdamW(params, lr=lr)
462
+ return opt
463
+
464
+
465
+ class LatentDiffusion(DDPM):
466
+ """main class"""
467
+ def __init__(self,
468
+ first_stage_config,
469
+ cond_stage_config,
470
+ num_timesteps_cond=None,
471
+ cond_stage_key="image",
472
+ cond_stage_trainable=False,
473
+ concat_mode=True,
474
+ cond_stage_forward=None,
475
+ conditioning_key=None,
476
+ scale_factor=1.0,
477
+ scale_shift=0.0,
478
+ scale_by_std=False,
479
+ use_3daware=False,
480
+ *args, **kwargs):
481
+ self.num_timesteps_cond = default(num_timesteps_cond, 1)
482
+ self.scale_by_std = scale_by_std
483
+ assert self.num_timesteps_cond <= kwargs['timesteps']
484
+ # for backwards compatibility after implementation of DiffusionWrapper
485
+ if conditioning_key is None:
486
+ conditioning_key = 'concat' if concat_mode else 'crossattn'
487
+ if cond_stage_config == '__is_unconditional__':
488
+ conditioning_key = None
489
+ ckpt_path = kwargs.pop("ckpt_path", None)
490
+ ignore_keys = kwargs.pop("ignore_keys", [])
491
+ super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
492
+ self.concat_mode = concat_mode
493
+ self.cond_stage_trainable = cond_stage_trainable
494
+ self.cond_stage_key = cond_stage_key
495
+ try:
496
+ self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
497
+ except:
498
+ self.num_downs = 0
499
+ if not scale_by_std:
500
+ self.scale_factor = scale_factor
501
+ self.scale_shift = scale_shift
502
+ else:
503
+ self.register_buffer('scale_factor', torch.tensor(scale_factor))
504
+ self.instantiate_first_stage(first_stage_config)
505
+ self.instantiate_cond_stage(cond_stage_config)
506
+ self.cond_stage_forward = cond_stage_forward
507
+ self.clip_denoised = False
508
+ self.bbox_tokenizer = None
509
+
510
+ self.restarted_from_ckpt = False
511
+ if ckpt_path is not None:
512
+ self.init_from_ckpt(ckpt_path, ignore_keys)
513
+ self.restarted_from_ckpt = True
514
+
515
+ self.use_3daware = use_3daware
516
+
517
+ self.is_test = False
518
+
519
+ self.test_mode = None
520
+ self.test_tag = ""
521
+
522
+ def make_cond_schedule(self, ):
523
+ self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
524
+ ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
525
+ self.cond_ids[:self.num_timesteps_cond] = ids
526
+
527
+ @rank_zero_only
528
+ @torch.no_grad()
529
+ def on_train_batch_start(self, batch, batch_idx):
530
+ # only for very first batch
531
+ if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
532
+ assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
533
+ # set rescale weight to 1./std of encodings
534
+ print("### USING STD-RESCALING ###")
535
+ x = super().get_input(batch, self.first_stage_key)
536
+ x = x.to(self.device)
537
+ encoder_posterior = self.encode_first_stage(x)
538
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
539
+ del self.scale_factor
540
+ self.register_buffer('scale_factor', 1. / z.flatten().std())
541
+ print(f"setting self.scale_factor to {self.scale_factor}")
542
+ print("### USING STD-RESCALING ###")
543
+
544
+ def register_schedule(self,
545
+ given_betas=None, beta_schedule="linear", timesteps=1000,
546
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, shift_scale=None):
547
+ super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s, shift_scale)
548
+
549
+ self.shorten_cond_schedule = self.num_timesteps_cond > 1
550
+ if self.shorten_cond_schedule:
551
+ self.make_cond_schedule()
552
+
553
+ def instantiate_first_stage(self, config):
554
+ model = instantiate_from_config(config)
555
+ self.first_stage_model = model.eval()
556
+ self.first_stage_model.train = disabled_train
557
+ for param in self.first_stage_model.parameters():
558
+ param.requires_grad = False
559
+
560
+ def instantiate_cond_stage(self, config):
561
+ if not self.cond_stage_trainable:
562
+ if config == "__is_first_stage__":
563
+ print("Using first stage also as cond stage.")
564
+ self.cond_stage_model = self.first_stage_model
565
+ elif config == "__is_unconditional__":
566
+ print(f"Training {self.__class__.__name__} as an unconditional model.")
567
+ self.cond_stage_model = None
568
+ # self.be_unconditional = True
569
+ else:
570
+ model = instantiate_from_config(config)
571
+ self.cond_stage_model = model.eval()
572
+ self.cond_stage_model.train = disabled_train
573
+ for param in self.cond_stage_model.parameters():
574
+ param.requires_grad = False
575
+ else:
576
+ assert config != '__is_first_stage__'
577
+ assert config != '__is_unconditional__'
578
+ model = instantiate_from_config(config)
579
+ self.cond_stage_model = model
580
+
581
+ def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
582
+ denoise_row = []
583
+ for zd in tqdm(samples, desc=desc):
584
+ denoise_row.append(self.decode_first_stage(zd.to(self.device),
585
+ force_not_quantize=force_no_decoder_quantization))
586
+ n_imgs_per_row = len(denoise_row)
587
+ denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
588
+ denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
589
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
590
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
591
+ return denoise_grid
592
+
593
+ def get_first_stage_encoding(self, encoder_posterior):
594
+ if isinstance(encoder_posterior, DiagonalGaussianDistribution):
595
+ z = encoder_posterior.mode()
596
+ # z = encoder_posterior.sample()
597
+ elif isinstance(encoder_posterior, torch.Tensor):
598
+ z = encoder_posterior
599
+ else:
600
+ raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
601
+ return self.scale_factor * (z + self.scale_shift)
602
+
603
+ def get_learned_conditioning(self, c):
604
+ if self.cond_stage_forward is None:
605
+ if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
606
+ c = self.cond_stage_model.encode(c)
607
+ if isinstance(c, DiagonalGaussianDistribution):
608
+ c = c.mode()
609
+ else:
610
+ c = self.cond_stage_model(c).float()
611
+ else:
612
+ assert hasattr(self.cond_stage_model, self.cond_stage_forward)
613
+ c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
614
+ return c
615
+
616
+ def meshgrid(self, h, w):
617
+ y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
618
+ x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
619
+
620
+ arr = torch.cat([y, x], dim=-1)
621
+ return arr
622
+
623
+ def delta_border(self, h, w):
624
+ """
625
+ :param h: height
626
+ :param w: width
627
+ :return: normalized distance to image border,
628
+ wtith min distance = 0 at border and max dist = 0.5 at image center
629
+ """
630
+ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
631
+ arr = self.meshgrid(h, w) / lower_right_corner
632
+ dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
633
+ dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
634
+ edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
635
+ return edge_dist
636
+
637
+ def get_weighting(self, h, w, Ly, Lx, device):
638
+ weighting = self.delta_border(h, w)
639
+ weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
640
+ self.split_input_params["clip_max_weight"], )
641
+ weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
642
+
643
+ if self.split_input_params["tie_braker"]:
644
+ L_weighting = self.delta_border(Ly, Lx)
645
+ L_weighting = torch.clip(L_weighting,
646
+ self.split_input_params["clip_min_tie_weight"],
647
+ self.split_input_params["clip_max_tie_weight"])
648
+
649
+ L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
650
+ weighting = weighting * L_weighting
651
+ return weighting
652
+
653
+ def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
654
+ """
655
+ :param x: img of size (bs, c, h, w)
656
+ :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
657
+ """
658
+ bs, nc, h, w = x.shape
659
+
660
+ # number of crops in image
661
+ Ly = (h - kernel_size[0]) // stride[0] + 1
662
+ Lx = (w - kernel_size[1]) // stride[1] + 1
663
+
664
+ if uf == 1 and df == 1:
665
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
666
+ unfold = torch.nn.Unfold(**fold_params)
667
+
668
+ fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
669
+
670
+ weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
671
+ normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
672
+ weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
673
+
674
+ elif uf > 1 and df == 1:
675
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
676
+ unfold = torch.nn.Unfold(**fold_params)
677
+
678
+ fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
679
+ dilation=1, padding=0,
680
+ stride=(stride[0] * uf, stride[1] * uf))
681
+ fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
682
+
683
+ weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
684
+ normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
685
+ weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
686
+
687
+ elif df > 1 and uf == 1:
688
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
689
+ unfold = torch.nn.Unfold(**fold_params)
690
+
691
+ fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
692
+ dilation=1, padding=0,
693
+ stride=(stride[0] // df, stride[1] // df))
694
+ fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
695
+
696
+ weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
697
+ normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
698
+ weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
699
+
700
+ else:
701
+ raise NotImplementedError
702
+
703
+ return fold, unfold, normalization, weighting
704
+
705
+ @torch.no_grad()
706
+ def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
707
+ cond_key=None, return_original_cond=False, bs=None):
708
+ x = super().get_input(batch, k)
709
+ if bs is not None:
710
+ x = x[:bs]
711
+ x = x.to(self.device)
712
+ encoder_posterior = self.encode_first_stage(x)
713
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
714
+
715
+ if self.model.conditioning_key is not None:
716
+ if cond_key is None:
717
+ cond_key = self.cond_stage_key
718
+ if cond_key != self.first_stage_key:
719
+ if cond_key in ['caption', 'coordinates_bbox']:
720
+ xc = batch[cond_key]
721
+ elif cond_key == 'class_label':
722
+ xc = batch
723
+ else:
724
+ xc = super().get_input(batch, cond_key).to(self.device)
725
+ else:
726
+ xc = x
727
+ if not self.cond_stage_trainable or force_c_encode:
728
+ if isinstance(xc, dict) or isinstance(xc, list):
729
+ # import pudb; pudb.set_trace()
730
+ c = self.get_learned_conditioning(xc)
731
+ else:
732
+ c = self.get_learned_conditioning(xc.to(self.device))
733
+ else:
734
+ c = xc
735
+ if bs is not None:
736
+ c = c[:bs]
737
+
738
+ if self.use_positional_encodings:
739
+ pos_x, pos_y = self.compute_latent_shifts(batch)
740
+ ckey = __conditioning_keys__[self.model.conditioning_key]
741
+ c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
742
+
743
+ else:
744
+ c = None
745
+ xc = None
746
+ if self.use_positional_encodings:
747
+ pos_x, pos_y = self.compute_latent_shifts(batch)
748
+ c = {'pos_x': pos_x, 'pos_y': pos_y}
749
+ out = [z, c]
750
+ if return_first_stage_outputs:
751
+ xrec = self.decode_first_stage(z)
752
+ out.extend([x, xrec])
753
+ if return_original_cond:
754
+ out.append(xc)
755
+
756
+ return out
757
+
758
+ @torch.no_grad()
759
+ def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
760
+ # assert not predict_cids
761
+ # if predict_cids:
762
+ # if z.dim() == 4:
763
+ # z = torch.argmax(z.exp(), dim=1).long()
764
+ # z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
765
+ # z = rearrange(z, 'b h w c -> b c h w').contiguous()
766
+
767
+ # import os
768
+ # import random
769
+ # import string
770
+ # z_np = z.detach().cpu().numpy()
771
+ # fname = ''.join(random.choices(string.ascii_uppercase + string.digits, k=8)) + '.npy'
772
+ # with open(os.path.join('/mnt/lustre/hongfangzhou.p/AE3D/tmp', fname), 'wb') as f:
773
+ # np.save(f, z_np)
774
+
775
+ z = 1. / self.scale_factor * z - self.scale_shift
776
+
777
+ # if hasattr(self, "split_input_params"):
778
+ # if self.split_input_params["patch_distributed_vq"]:
779
+ # ks = self.split_input_params["ks"] # eg. (128, 128)
780
+ # stride = self.split_input_params["stride"] # eg. (64, 64)
781
+ # uf = self.split_input_params["vqf"]
782
+ # bs, nc, h, w = z.shape
783
+ # if ks[0] > h or ks[1] > w:
784
+ # ks = (min(ks[0], h), min(ks[1], w))
785
+ # print("reducing Kernel")
786
+
787
+ # if stride[0] > h or stride[1] > w:
788
+ # stride = (min(stride[0], h), min(stride[1], w))
789
+ # print("reducing stride")
790
+
791
+ # fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
792
+
793
+ # z = unfold(z) # (bn, nc * prod(**ks), L)
794
+ # # 1. Reshape to img shape
795
+ # z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
796
+
797
+ # # 2. apply model loop over last dim
798
+ # if isinstance(self.first_stage_model, VQModelInterface):
799
+ # output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
800
+ # force_not_quantize=predict_cids or force_not_quantize)
801
+ # for i in range(z.shape[-1])]
802
+ # else:
803
+
804
+ # output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
805
+ # for i in range(z.shape[-1])]
806
+
807
+ # o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
808
+ # o = o * weighting
809
+ # # Reverse 1. reshape to img shape
810
+ # o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
811
+ # # stitch crops together
812
+ # decoded = fold(o)
813
+ # decoded = decoded / normalization # norm is shape (1, 1, h, w)
814
+ # return decoded
815
+ # else:
816
+ # if isinstance(self.first_stage_model, VQModelInterface):
817
+ # return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
818
+ # else:
819
+ # return self.first_stage_model.decode(z)
820
+
821
+ # else:
822
+ # if isinstance(self.first_stage_model, VQModelInterface):
823
+ # return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
824
+ # else:
825
+ return self.first_stage_model.decode(z, unrollout=True)
826
+
827
+ # same as above but without decorator
828
+ def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
829
+ if predict_cids:
830
+ if z.dim() == 4:
831
+ z = torch.argmax(z.exp(), dim=1).long()
832
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
833
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
834
+
835
+ z = 1. / self.scale_factor * z - self.scale_shift
836
+
837
+ if hasattr(self, "split_input_params"):
838
+ if self.split_input_params["patch_distributed_vq"]:
839
+ ks = self.split_input_params["ks"] # eg. (128, 128)
840
+ stride = self.split_input_params["stride"] # eg. (64, 64)
841
+ uf = self.split_input_params["vqf"]
842
+ bs, nc, h, w = z.shape
843
+ if ks[0] > h or ks[1] > w:
844
+ ks = (min(ks[0], h), min(ks[1], w))
845
+ print("reducing Kernel")
846
+
847
+ if stride[0] > h or stride[1] > w:
848
+ stride = (min(stride[0], h), min(stride[1], w))
849
+ print("reducing stride")
850
+
851
+ fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
852
+
853
+ z = unfold(z) # (bn, nc * prod(**ks), L)
854
+ # 1. Reshape to img shape
855
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
856
+
857
+ # 2. apply model loop over last dim
858
+ if isinstance(self.first_stage_model, VQModelInterface):
859
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
860
+ force_not_quantize=predict_cids or force_not_quantize)
861
+ for i in range(z.shape[-1])]
862
+ else:
863
+
864
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
865
+ for i in range(z.shape[-1])]
866
+
867
+ o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
868
+ o = o * weighting
869
+ # Reverse 1. reshape to img shape
870
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
871
+ # stitch crops together
872
+ decoded = fold(o)
873
+ decoded = decoded / normalization # norm is shape (1, 1, h, w)
874
+ return decoded
875
+ else:
876
+ if isinstance(self.first_stage_model, VQModelInterface):
877
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
878
+ else:
879
+ return self.first_stage_model.decode(z)
880
+
881
+ else:
882
+ if isinstance(self.first_stage_model, VQModelInterface):
883
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
884
+ else:
885
+ return self.first_stage_model.decode(z)
886
+
887
+ @torch.no_grad()
888
+ def encode_first_stage(self, x):
889
+ # if hasattr(self, "split_input_params"):
890
+ # if self.split_input_params["patch_distributed_vq"]:
891
+ # ks = self.split_input_params["ks"] # eg. (128, 128)
892
+ # stride = self.split_input_params["stride"] # eg. (64, 64)
893
+ # df = self.split_input_params["vqf"]
894
+ # self.split_input_params['original_image_size'] = x.shape[-2:]
895
+ # bs, nc, h, w = x.shape
896
+ # if ks[0] > h or ks[1] > w:
897
+ # ks = (min(ks[0], h), min(ks[1], w))
898
+ # print("reducing Kernel")
899
+
900
+ # if stride[0] > h or stride[1] > w:
901
+ # stride = (min(stride[0], h), min(stride[1], w))
902
+ # print("reducing stride")
903
+
904
+ # fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
905
+ # z = unfold(x) # (bn, nc * prod(**ks), L)
906
+ # # Reshape to img shape
907
+ # z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
908
+
909
+ # output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
910
+ # for i in range(z.shape[-1])]
911
+
912
+ # o = torch.stack(output_list, axis=-1)
913
+ # o = o * weighting
914
+
915
+ # # Reverse reshape to img shape
916
+ # o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
917
+ # # stitch crops together
918
+ # decoded = fold(o)
919
+ # decoded = decoded / normalization
920
+ # return decoded
921
+
922
+ # else:
923
+ # return self.first_stage_model.encode(x)
924
+ # else:
925
+ return self.first_stage_model.encode(x, rollout=True)
926
+
927
+ def get_norm(self, x):
928
+ norm = torch.linalg.norm(x, dim=-1, keepdim=True)
929
+ norm[norm == 0] = 1
930
+
931
+ assert norm.shape[-1] == 1
932
+ assert norm.shape[0] == x.shape[0]
933
+ assert norm.shape[1] == x.shape[1]
934
+ assert x.shape[1] == 1
935
+
936
+ return norm
937
+
938
+ def random_text_feature_noise(self, c):
939
+ noise = torch.randn_like(c)
940
+ # alpha = 0.999
941
+ alpha = 1
942
+ nc = alpha * c / self.get_norm(c) + (1 - alpha) * noise / self.get_norm(noise)
943
+ nc = nc / self.get_norm(nc)
944
+
945
+ import random
946
+ if random.randint(0, 10) == 0:
947
+ nc[:] = 0
948
+ nc = c
949
+
950
+ return nc
951
+
952
+ def shared_step(self, batch, **kwargs):
953
+ x, c = self.get_input(batch, self.first_stage_key)
954
+ # print("Random augment text feature...")
955
+ c = self.random_text_feature_noise(c)
956
+ loss = self(x, c)
957
+ return loss
958
+
959
+ def forward(self, x, c=None, return_inter=False, *args, **kwargs):
960
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
961
+ if self.model.conditioning_key is not None:
962
+ assert c is not None
963
+ if self.cond_stage_trainable:
964
+ c = self.get_learned_conditioning(c)
965
+ if self.shorten_cond_schedule: # TODO: drop this option
966
+ tc = self.cond_ids[t].to(self.device)
967
+ c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
968
+ return self.p_losses(x, c, t, return_inter=return_inter, *args, **kwargs)
969
+
970
+ def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
971
+ def rescale_bbox(bbox):
972
+ x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
973
+ y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
974
+ w = min(bbox[2] / crop_coordinates[2], 1 - x0)
975
+ h = min(bbox[3] / crop_coordinates[3], 1 - y0)
976
+ return x0, y0, w, h
977
+
978
+ return [rescale_bbox(b) for b in bboxes]
979
+
980
+ def to3daware(self, triplane):
981
+ res = triplane.shape[-2]
982
+ plane1 = triplane[..., :res]
983
+ plane2 = triplane[..., res:2*res]
984
+ plane3 = triplane[..., 2*res:3*res]
985
+
986
+ x_mp = torch.nn.AvgPool2d((res, 1))
987
+ y_mp = torch.nn.AvgPool2d((1, res))
988
+ x_mp_rep = lambda i: x_mp(i).repeat(1, 1, res, 1).permute(0, 1, 3, 2)
989
+ y_mp_rep = lambda i: y_mp(i).repeat(1, 1, 1, res).permute(0, 1, 3, 2)
990
+ # for plane1
991
+ plane21 = x_mp_rep(plane2)
992
+ plane31 = torch.flip(y_mp_rep(plane3), (3,))
993
+ new_plane1 = torch.cat([plane1, plane21, plane31], 1)
994
+ # for plane2
995
+ plane12 = y_mp_rep(plane1)
996
+ plane32 = x_mp_rep(plane3)
997
+ new_plane2 = torch.cat([plane2, plane12, plane32], 1)
998
+ # for plane3
999
+ plane13 = torch.flip(x_mp_rep(plane1), (2,))
1000
+ plane23 = y_mp_rep(plane2)
1001
+ new_plane3 = torch.cat([plane3, plane13, plane23], 1)
1002
+
1003
+ new_plane = torch.cat([new_plane1, new_plane2, new_plane3], -1).contiguous()
1004
+ return new_plane
1005
+
1006
+ # B, C, H, W = h.shape
1007
+ # h_xy = th.cat([h[..., 0:(W//3)], h[..., (W//3):(2*W//3)].mean(-1).unsqueeze(-1).repeat(1, 1, 1, W//3), h[..., (2*W//3):W].mean(-2).unsqueeze(-2).repeat(1, 1, H, 1)], 1)
1008
+ # h_xz = th.cat([h[..., (W//3):(2*W//3)], h[..., 0:(W//3)].mean(-1).unsqueeze(-1).repeat(1, 1, 1, W//3), h[..., (2*W//3):W].mean(-1).unsqueeze(-1).repeat(1, 1, 1, W//3)], 1)
1009
+ # h_zy = th.cat([h[..., (2*W//3):W], h[..., 0:(W//3)].mean(-2).unsqueeze(-2).repeat(1, 1, H, 1), h[..., (W//3):(2*W//3)].mean(-2).unsqueeze(-2).repeat(1, 1, H, 1)], 1)
1010
+ # h = th.cat([h_xy, h_xz, h_zy], -1)
1011
+
1012
+ def apply_model(self, x_noisy, t, cond, return_ids=False):
1013
+
1014
+ if isinstance(cond, dict):
1015
+ # hybrid case, cond is exptected to be a dict
1016
+ pass
1017
+ else:
1018
+ if not isinstance(cond, list):
1019
+ cond = [cond]
1020
+ key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
1021
+ cond = {key: cond}
1022
+
1023
+ if hasattr(self, "split_input_params"):
1024
+ assert len(cond) == 1 # todo can only deal with one conditioning atm
1025
+ assert not return_ids
1026
+ ks = self.split_input_params["ks"] # eg. (128, 128)
1027
+ stride = self.split_input_params["stride"] # eg. (64, 64)
1028
+
1029
+ h, w = x_noisy.shape[-2:]
1030
+
1031
+ fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
1032
+
1033
+ z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
1034
+ # Reshape to img shape
1035
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
1036
+ z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
1037
+
1038
+ if self.cond_stage_key in ["image", "LR_image", "segmentation",
1039
+ 'bbox_img'] and self.model.conditioning_key: # todo check for completeness
1040
+ c_key = next(iter(cond.keys())) # get key
1041
+ c = next(iter(cond.values())) # get value
1042
+ assert (len(c) == 1) # todo extend to list with more than one elem
1043
+ c = c[0] # get element
1044
+
1045
+ c = unfold(c)
1046
+ c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
1047
+
1048
+ cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
1049
+
1050
+ elif self.cond_stage_key == 'coordinates_bbox':
1051
+ assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
1052
+
1053
+ # assuming padding of unfold is always 0 and its dilation is always 1
1054
+ n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
1055
+ full_img_h, full_img_w = self.split_input_params['original_image_size']
1056
+ # as we are operating on latents, we need the factor from the original image size to the
1057
+ # spatial latent size to properly rescale the crops for regenerating the bbox annotations
1058
+ num_downs = self.first_stage_model.encoder.num_resolutions - 1
1059
+ rescale_latent = 2 ** (num_downs)
1060
+
1061
+ # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
1062
+ # need to rescale the tl patch coordinates to be in between (0,1)
1063
+ tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
1064
+ rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
1065
+ for patch_nr in range(z.shape[-1])]
1066
+
1067
+ # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
1068
+ patch_limits = [(x_tl, y_tl,
1069
+ rescale_latent * ks[0] / full_img_w,
1070
+ rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
1071
+ # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
1072
+
1073
+ # tokenize crop coordinates for the bounding boxes of the respective patches
1074
+ patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
1075
+ for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
1076
+ print(patch_limits_tknzd[0].shape)
1077
+ # cut tknzd crop position from conditioning
1078
+ assert isinstance(cond, dict), 'cond must be dict to be fed into model'
1079
+ cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
1080
+ print(cut_cond.shape)
1081
+
1082
+ adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
1083
+ adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
1084
+ print(adapted_cond.shape)
1085
+ adapted_cond = self.get_learned_conditioning(adapted_cond)
1086
+ print(adapted_cond.shape)
1087
+ adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
1088
+ print(adapted_cond.shape)
1089
+
1090
+ cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
1091
+
1092
+ else:
1093
+ cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
1094
+
1095
+ # apply model by loop over crops
1096
+ output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
1097
+ assert not isinstance(output_list[0],
1098
+ tuple) # todo cant deal with multiple model outputs check this never happens
1099
+
1100
+ o = torch.stack(output_list, axis=-1)
1101
+ o = o * weighting
1102
+ # Reverse reshape to img shape
1103
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
1104
+ # stitch crops together
1105
+ x_recon = fold(o) / normalization
1106
+
1107
+ else:
1108
+ if self.use_3daware:
1109
+ x_noisy_3daware = self.to3daware(x_noisy)
1110
+ x_recon = self.model(x_noisy_3daware, t, **cond)
1111
+ else:
1112
+ x_recon = self.model(x_noisy, t, **cond)
1113
+
1114
+ if isinstance(x_recon, tuple) and not return_ids:
1115
+ return x_recon[0]
1116
+ else:
1117
+ return x_recon
1118
+
1119
+ def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
1120
+ return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
1121
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
1122
+
1123
+ def _prior_bpd(self, x_start):
1124
+ """
1125
+ Get the prior KL term for the variational lower-bound, measured in
1126
+ bits-per-dim.
1127
+ This term can't be optimized, as it only depends on the encoder.
1128
+ :param x_start: the [N x C x ...] tensor of inputs.
1129
+ :return: a batch of [N] KL values (in bits), one per batch element.
1130
+ """
1131
+ batch_size = x_start.shape[0]
1132
+ t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
1133
+ qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
1134
+ kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
1135
+ return mean_flat(kl_prior) / np.log(2.0)
1136
+
1137
+ def p_losses(self, x_start, cond, t, noise=None, return_inter=False):
1138
+ noise = default(noise, lambda: torch.randn_like(x_start))
1139
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
1140
+ model_output = self.apply_model(x_noisy, t, cond)
1141
+
1142
+ loss_dict = {}
1143
+ prefix = 'train' if self.training else 'val'
1144
+
1145
+ if self.parameterization == "x0":
1146
+ target = x_start
1147
+ elif self.parameterization == "eps":
1148
+ target = noise
1149
+ elif self.parameterization == "v":
1150
+ target = self.get_v(x_start, noise, t)
1151
+ else:
1152
+ raise NotImplementedError()
1153
+
1154
+ loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
1155
+ loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
1156
+
1157
+ logvar_t = self.logvar[t.to(self.logvar.device)].to(self.device)
1158
+ loss = loss_simple / torch.exp(logvar_t) + logvar_t
1159
+ # loss = loss_simple / torch.exp(self.logvar) + self.logvar
1160
+ if self.learn_logvar:
1161
+ loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
1162
+ loss_dict.update({'logvar': self.logvar.data.mean()})
1163
+
1164
+ loss = self.l_simple_weight * loss.mean()
1165
+
1166
+ loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
1167
+ loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
1168
+ loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
1169
+ loss += (self.original_elbo_weight * loss_vlb)
1170
+ loss_dict.update({f'{prefix}/loss': loss})
1171
+
1172
+ if return_inter:
1173
+ return loss, loss_dict, self.predict_start_from_noise(x_noisy, t=t, noise=model_output)
1174
+ else:
1175
+ return loss, loss_dict
1176
+
1177
+ def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
1178
+ return_x0=False, score_corrector=None, corrector_kwargs=None):
1179
+ t_in = t
1180
+ model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
1181
+
1182
+ if score_corrector is not None:
1183
+ assert self.parameterization == "eps"
1184
+ model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
1185
+
1186
+ if return_codebook_ids:
1187
+ model_out, logits = model_out
1188
+
1189
+ if self.parameterization == "eps":
1190
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
1191
+ elif self.parameterization == "x0":
1192
+ x_recon = model_out
1193
+ elif self.parameterization == "v":
1194
+ x_recon = self.predict_start_from_z_and_v(x, t, model_out)
1195
+ else:
1196
+ raise NotImplementedError()
1197
+
1198
+ if clip_denoised:
1199
+ x_recon.clamp_(-1., 1.)
1200
+ if quantize_denoised:
1201
+ x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
1202
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
1203
+ if return_codebook_ids:
1204
+ return model_mean, posterior_variance, posterior_log_variance, logits
1205
+ elif return_x0:
1206
+ return model_mean, posterior_variance, posterior_log_variance, x_recon
1207
+ else:
1208
+ return model_mean, posterior_variance, posterior_log_variance
1209
+
1210
+ @torch.no_grad()
1211
+ def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
1212
+ return_codebook_ids=False, quantize_denoised=False, return_x0=False,
1213
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
1214
+ b, *_, device = *x.shape, x.device
1215
+ outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
1216
+ return_codebook_ids=return_codebook_ids,
1217
+ quantize_denoised=quantize_denoised,
1218
+ return_x0=return_x0,
1219
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1220
+ if return_codebook_ids:
1221
+ raise DeprecationWarning("Support dropped.")
1222
+ model_mean, _, model_log_variance, logits = outputs
1223
+ elif return_x0:
1224
+ model_mean, _, model_log_variance, x0 = outputs
1225
+ else:
1226
+ model_mean, _, model_log_variance = outputs
1227
+
1228
+ noise = noise_like(x.shape, device, repeat_noise) * temperature
1229
+ if noise_dropout > 0.:
1230
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
1231
+ # no noise when t == 0
1232
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
1233
+
1234
+ if return_codebook_ids:
1235
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
1236
+ if return_x0:
1237
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
1238
+ else:
1239
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
1240
+
1241
+ @torch.no_grad()
1242
+ def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
1243
+ img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
1244
+ score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
1245
+ log_every_t=None):
1246
+ if not log_every_t:
1247
+ log_every_t = self.log_every_t
1248
+ timesteps = self.num_timesteps
1249
+ if batch_size is not None:
1250
+ b = batch_size if batch_size is not None else shape[0]
1251
+ shape = [batch_size] + list(shape)
1252
+ else:
1253
+ b = batch_size = shape[0]
1254
+ if x_T is None:
1255
+ img = torch.randn(shape, device=self.device)
1256
+ else:
1257
+ img = x_T
1258
+ intermediates = []
1259
+ if cond is not None:
1260
+ if isinstance(cond, dict):
1261
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1262
+ list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1263
+ else:
1264
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1265
+
1266
+ if start_T is not None:
1267
+ timesteps = min(timesteps, start_T)
1268
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
1269
+ total=timesteps) if verbose else reversed(
1270
+ range(0, timesteps))
1271
+ if type(temperature) == float:
1272
+ temperature = [temperature] * timesteps
1273
+
1274
+ for i in iterator:
1275
+ ts = torch.full((b,), i, device=self.device, dtype=torch.long)
1276
+ if self.shorten_cond_schedule:
1277
+ assert self.model.conditioning_key != 'hybrid'
1278
+ tc = self.cond_ids[ts].to(cond.device)
1279
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1280
+
1281
+ img, x0_partial = self.p_sample(img, cond, ts,
1282
+ clip_denoised=self.clip_denoised,
1283
+ quantize_denoised=quantize_denoised, return_x0=True,
1284
+ temperature=temperature[i], noise_dropout=noise_dropout,
1285
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1286
+ if mask is not None:
1287
+ assert x0 is not None
1288
+ img_orig = self.q_sample(x0, ts)
1289
+ img = img_orig * mask + (1. - mask) * img
1290
+
1291
+ if i % log_every_t == 0 or i == timesteps - 1:
1292
+ intermediates.append(x0_partial)
1293
+ if callback: callback(i)
1294
+ if img_callback: img_callback(img, i)
1295
+ return img, intermediates
1296
+
1297
+ @torch.no_grad()
1298
+ def p_sample_loop(self, cond, shape, return_intermediates=False,
1299
+ x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
1300
+ mask=None, x0=None, img_callback=None, start_T=None,
1301
+ log_every_t=None):
1302
+
1303
+ if not log_every_t:
1304
+ log_every_t = self.log_every_t
1305
+ device = self.betas.device
1306
+ b = shape[0]
1307
+ if x_T is None:
1308
+ img = torch.randn(shape, device=device)
1309
+ else:
1310
+ img = x_T
1311
+
1312
+ intermediates = [img]
1313
+ if timesteps is None:
1314
+ timesteps = self.num_timesteps
1315
+
1316
+ if start_T is not None:
1317
+ timesteps = min(timesteps, start_T)
1318
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
1319
+ range(0, timesteps))
1320
+
1321
+ if mask is not None:
1322
+ assert x0 is not None
1323
+ assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
1324
+
1325
+ for i in iterator:
1326
+ ts = torch.full((b,), i, device=device, dtype=torch.long)
1327
+ if self.shorten_cond_schedule:
1328
+ assert self.model.conditioning_key != 'hybrid'
1329
+ tc = self.cond_ids[ts].to(cond.device)
1330
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1331
+
1332
+ # if self.is_test and i % 50 == 0:
1333
+ # decode_res = self.decode_first_stage(img)
1334
+ # rgb_sample, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1335
+ # decode_res, self.batch_rays, self.batch_img,
1336
+ # )
1337
+ # rgb_sample = to8b(rgb_sample.detach().cpu().numpy())[0]
1338
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "sample_process_{}.png".format(i)), rgb_sample)
1339
+ # colorize_res = self.first_stage_model.to_rgb(img)
1340
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "sample_process_latent_{}.png".format(i)), colorize_res[0])
1341
+
1342
+ img = self.p_sample(img, cond, ts,
1343
+ clip_denoised=self.clip_denoised,
1344
+ quantize_denoised=quantize_denoised)
1345
+ if mask is not None:
1346
+ img_orig = self.q_sample(x0, ts)
1347
+ img = img_orig * mask + (1. - mask) * img
1348
+
1349
+ if i % log_every_t == 0 or i == timesteps - 1:
1350
+ intermediates.append(img)
1351
+ if callback: callback(i)
1352
+ if img_callback: img_callback(img, i)
1353
+
1354
+ if return_intermediates:
1355
+ return img, intermediates
1356
+ return img
1357
+
1358
+ @torch.no_grad()
1359
+ def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
1360
+ verbose=True, timesteps=None, quantize_denoised=False,
1361
+ mask=None, x0=None, shape=None,**kwargs):
1362
+ if shape is None:
1363
+ shape = (batch_size, self.channels, self.image_size, self.image_size * 3)
1364
+ if cond is not None:
1365
+ if isinstance(cond, dict):
1366
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1367
+ list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1368
+ else:
1369
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1370
+ return self.p_sample_loop(cond,
1371
+ shape,
1372
+ return_intermediates=return_intermediates, x_T=x_T,
1373
+ verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
1374
+ mask=mask, x0=x0)
1375
+
1376
+ @torch.no_grad()
1377
+ def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
1378
+
1379
+ if ddim:
1380
+ ddim_sampler = DDIMSampler(self)
1381
+ shape = (self.channels, self.image_size, self.image_size)
1382
+ samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
1383
+ shape,cond,verbose=False,**kwargs)
1384
+
1385
+ else:
1386
+ samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
1387
+ return_intermediates=True,**kwargs)
1388
+
1389
+ return samples, intermediates
1390
+
1391
+ @torch.no_grad()
1392
+ def validation_step(self, batch, batch_idx):
1393
+ # x, c = self.get_input(batch, self.first_stage_key)
1394
+ # self.batch_rays = batch['batch_rays'][0][1:2]
1395
+ # self.batch_img = batch['img'][0][1:2]
1396
+ # self.is_test = True
1397
+ # self.test_schedule(x[0:1])
1398
+ # exit(0)
1399
+
1400
+ _, loss_dict_no_ema = self.shared_step(batch)
1401
+ with self.ema_scope():
1402
+ # _, loss_dict_ema = self.shared_step(batch)
1403
+ x, c = self.get_input(batch, self.first_stage_key)
1404
+ _, loss_dict_ema, inter_res = self(x, c, return_inter=True)
1405
+ loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
1406
+ self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True, sync_dist=True)
1407
+ self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True, sync_dist=True)
1408
+
1409
+ if batch_idx < 2:
1410
+ if self.num_timesteps < 1000:
1411
+ x_T = self.q_sample(x_start=x[0:1], t=torch.full((1,), self.num_timesteps-1, device=x.device, dtype=torch.long), noise=torch.randn_like(x[0:1]))
1412
+ print("Specifying x_T when sampling!")
1413
+ else:
1414
+ x_T = None
1415
+ with self.ema_scope():
1416
+ res = self.sample(c, 1, shape=x[0:1].shape, x_T = x_T)
1417
+ decode_res = self.decode_first_stage(res)
1418
+ decode_input = self.decode_first_stage(x[:1])
1419
+ decode_output = self.decode_first_stage(inter_res[:1])
1420
+
1421
+ colorize_res = self.first_stage_model.to_rgb(res)[0]
1422
+ colorize_x = self.first_stage_model.to_rgb(x[:1])[0]
1423
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "sample_{}_{}.png".format(batch_idx, 0)), colorize_res[0])
1424
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "gt_{}_{}.png".format(batch_idx, 0)), colorize_x[0])
1425
+
1426
+ rgb_sample, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1427
+ decode_res, batch['batch_rays'][0], batch['img'][0],
1428
+ )
1429
+ rgb_input, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1430
+ decode_input, batch['batch_rays'][0], batch['img'][0],
1431
+ )
1432
+ rgb_output, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1433
+ decode_output, batch['batch_rays'][0], batch['img'][0],
1434
+ )
1435
+ rgb_sample = to8b(rgb_sample.detach().cpu().numpy())
1436
+ rgb_input = to8b(rgb_input.detach().cpu().numpy())
1437
+ rgb_output = to8b(rgb_output.detach().cpu().numpy())
1438
+
1439
+ if rgb_sample.shape[0] == 1:
1440
+ rgb_all = np.concatenate([rgb_sample[0], rgb_input[0], rgb_output[0]], 1)
1441
+ else:
1442
+ rgb_all = np.concatenate([rgb_sample[1], rgb_input[1], rgb_output[1]], 1)
1443
+
1444
+ rgb_all = np.stack([rgb_all[..., 2], rgb_all[..., 1], rgb_all[..., 0]], -1)
1445
+
1446
+ if self.model.conditioning_key is not None:
1447
+ if self.cond_stage_key == 'img_cond':
1448
+ cond_img = super().get_input(batch, self.cond_stage_key)[0].permute(1, 2, 0)
1449
+ rgb_all = np.concatenate([rgb_all, to8b(cond_img.cpu().numpy())], 1)
1450
+ else:
1451
+ import cv2
1452
+ font = cv2.FONT_HERSHEY_SIMPLEX
1453
+ # org
1454
+ org = (50, 50)
1455
+ # fontScale
1456
+ fontScale = 1
1457
+ # Blue color in BGR
1458
+ color = (255, 0, 0)
1459
+ # Line thickness of 2 px
1460
+ thickness = 2
1461
+ caption = super().get_input(batch, self.cond_stage_key)[0]
1462
+ break_caption = []
1463
+ for i in range(len(caption) // 30 + 1):
1464
+ break_caption_i = caption[i*30:(i+1)*30]
1465
+ break_caption.append(break_caption_i)
1466
+ for i, bci in enumerate(break_caption):
1467
+ cv2.putText(rgb_all, bci, (50, 50*(i+1)), font, fontScale, color, thickness, cv2.LINE_AA)
1468
+
1469
+ self.logger.experiment.log({
1470
+ "val/vis": [wandb.Image(rgb_all)],
1471
+ "val/colorize_rse": [wandb.Image(colorize_res)],
1472
+ "val/colorize_x": [wandb.Image(colorize_x)],
1473
+ })
1474
+
1475
+ @torch.no_grad()
1476
+ def test_schedule(self, x_start, freq=50):
1477
+ noise = torch.randn_like(x_start)
1478
+ img_list = []
1479
+ latent_list = []
1480
+ for t in tqdm(range(self.num_timesteps)):
1481
+ if t % freq == 0:
1482
+ t_long = torch.Tensor([t,]).long().to(x_start.device)
1483
+ x_noisy = self.q_sample(x_start=x_start, t=t_long, noise=noise)
1484
+ decode_res = self.decode_first_stage(x_noisy)
1485
+ rgb_sample, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1486
+ decode_res, self.batch_rays, self.batch_img,
1487
+ )
1488
+ rgb_sample = to8b(rgb_sample.detach().cpu().numpy())[0]
1489
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "add_noise_{}.png".format(t)), rgb_sample)
1490
+ colorize_res = self.first_stage_model.to_rgb(x_noisy)
1491
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "add_noise_latent_{}.png".format(t)), colorize_res[0])
1492
+ img_list.append(rgb_sample)
1493
+ latent_list.append(colorize_res[0])
1494
+ imageio.imwrite(os.path.join(self.logger.log_dir, "add_noise_{}_{}_{}_{}.png".format(self.linear_start, self.linear_end, self.beta_schedule, self.scale_factor)), np.concatenate(img_list, 1))
1495
+ imageio.imwrite(os.path.join(self.logger.log_dir, "add_noise_latent_{}_{}_{}_{}.png".format(self.linear_start, self.linear_end, self.beta_schedule, self.scale_factor)), np.concatenate(latent_list, 1))
1496
+
1497
+ @torch.no_grad()
1498
+ def test_step(self, batch, batch_idx):
1499
+ x, c = self.get_input(batch, self.first_stage_key)
1500
+ if self.test_mode == 'fid':
1501
+ bs = x.shape[0]
1502
+ else:
1503
+ bs = 1
1504
+ if self.test_mode == 'noise_schedule':
1505
+ self.batch_rays = batch['batch_rays'][0][33:34]
1506
+ self.batch_img = batch['img'][0][33:34]
1507
+ self.is_test = True
1508
+ self.test_schedule(x)
1509
+ exit(0)
1510
+ with self.ema_scope():
1511
+ if c is not None:
1512
+ res = self.sample(c[:bs], bs, shape=x[0:bs].shape)
1513
+ else:
1514
+ res = self.sample(None, bs, shape=x[0:bs].shape)
1515
+ decode_res = self.decode_first_stage(res)
1516
+ if self.test_mode == 'fid':
1517
+ folder = os.path.join(self.logger.log_dir, 'FID_' + self.test_tag)
1518
+ if not os.path.exists(folder):
1519
+ os.makedirs(folder, exist_ok=True)
1520
+ rgb_sample_list = []
1521
+ for b in range(bs):
1522
+ rgb_sample, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1523
+ decode_res[b:b+1], batch['batch_rays'][b], batch['img'][b],
1524
+ )
1525
+ rgb_sample = to8b(rgb_sample.detach().cpu().numpy())
1526
+ rgb_sample_list.append(rgb_sample)
1527
+ for i in range(len(rgb_sample_list)):
1528
+ for v in range(rgb_sample_list[i].shape[0]):
1529
+ imageio.imwrite(os.path.join(folder, "sample_{}_{}_{}.png".format(batch_idx, i, v)), rgb_sample_list[i][v])
1530
+ elif self.test_mode == 'sample':
1531
+ colorize_res = self.first_stage_model.to_rgb(res)
1532
+ colorize_x = self.first_stage_model.to_rgb(x[:1])
1533
+ imageio.imwrite(os.path.join(self.logger.log_dir, "sample_{}_{}.png".format(batch_idx, 0)), colorize_res[0])
1534
+ imageio.imwrite(os.path.join(self.logger.log_dir, "gt_{}_{}.png".format(batch_idx, 0)), colorize_x[0])
1535
+ if self.model.conditioning_key is not None:
1536
+ cond_img = super().get_input(batch, self.cond_stage_key)[0].permute(1, 2, 0)
1537
+ cond_img = to8b(cond_img.cpu().numpy())
1538
+ imageio.imwrite(os.path.join(self.logger.log_dir, "cond_{}_{}.png".format(batch_idx, 0)), cond_img)
1539
+ for b in range(bs):
1540
+ video = []
1541
+ for v in tqdm(range(batch['batch_rays'].shape[1])):
1542
+ rgb_sample, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1543
+ decode_res[b:b+1], batch['batch_rays'][0][v:v+1], batch['img'][0][v:v+1],
1544
+ )
1545
+ rgb_sample = to8b(rgb_sample.detach().cpu().numpy())[0]
1546
+ video.append(rgb_sample)
1547
+ imageio.mimwrite(os.path.join(self.logger.log_dir, "sample_{}_{}.mp4".format(batch_idx, b)), video, fps=24)
1548
+ print("Saving to {}".format(os.path.join(self.logger.log_dir, "sample_{}_{}.mp4".format(batch_idx, b))))
1549
+
1550
+ @torch.no_grad()
1551
+ def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1552
+ quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1553
+ plot_diffusion_rows=True, **kwargs):
1554
+
1555
+ use_ddim = ddim_steps is not None
1556
+
1557
+ log = dict()
1558
+ z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
1559
+ return_first_stage_outputs=True,
1560
+ force_c_encode=True,
1561
+ return_original_cond=True,
1562
+ bs=N)
1563
+ N = min(x.shape[0], N)
1564
+ n_row = min(x.shape[0], n_row)
1565
+ log["inputs"] = x
1566
+ log["reconstruction"] = xrec
1567
+ if self.model.conditioning_key is not None:
1568
+ if hasattr(self.cond_stage_model, "decode"):
1569
+ xc = self.cond_stage_model.decode(c)
1570
+ log["conditioning"] = xc
1571
+ elif self.cond_stage_key in ["caption"]:
1572
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
1573
+ log["conditioning"] = xc
1574
+ elif self.cond_stage_key == 'class_label':
1575
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
1576
+ log['conditioning'] = xc
1577
+ elif isimage(xc):
1578
+ log["conditioning"] = xc
1579
+ if ismap(xc):
1580
+ log["original_conditioning"] = self.to_rgb(xc)
1581
+
1582
+ if plot_diffusion_rows:
1583
+ # get diffusion row
1584
+ diffusion_row = list()
1585
+ z_start = z[:n_row]
1586
+ for t in range(self.num_timesteps):
1587
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1588
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1589
+ t = t.to(self.device).long()
1590
+ noise = torch.randn_like(z_start)
1591
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1592
+ diffusion_row.append(self.decode_first_stage(z_noisy))
1593
+
1594
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1595
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1596
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1597
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1598
+ log["diffusion_row"] = diffusion_grid
1599
+
1600
+ if sample:
1601
+ # get denoise row
1602
+ with self.ema_scope("Plotting"):
1603
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1604
+ ddim_steps=ddim_steps,eta=ddim_eta)
1605
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1606
+ x_samples = self.decode_first_stage(samples)
1607
+ log["samples"] = x_samples
1608
+ if plot_denoise_rows:
1609
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1610
+ log["denoise_row"] = denoise_grid
1611
+
1612
+ if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
1613
+ self.first_stage_model, IdentityFirstStage):
1614
+ # also display when quantizing x0 while sampling
1615
+ with self.ema_scope("Plotting Quantized Denoised"):
1616
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1617
+ ddim_steps=ddim_steps,eta=ddim_eta,
1618
+ quantize_denoised=True)
1619
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
1620
+ # quantize_denoised=True)
1621
+ x_samples = self.decode_first_stage(samples.to(self.device))
1622
+ log["samples_x0_quantized"] = x_samples
1623
+
1624
+ if inpaint:
1625
+ # make a simple center square
1626
+ b, h, w = z.shape[0], z.shape[2], z.shape[3]
1627
+ mask = torch.ones(N, h, w).to(self.device)
1628
+ # zeros will be filled in
1629
+ mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1630
+ mask = mask[:, None, ...]
1631
+ with self.ema_scope("Plotting Inpaint"):
1632
+
1633
+ samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
1634
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1635
+ x_samples = self.decode_first_stage(samples.to(self.device))
1636
+ log["samples_inpainting"] = x_samples
1637
+ log["mask"] = mask
1638
+
1639
+ # outpaint
1640
+ with self.ema_scope("Plotting Outpaint"):
1641
+ samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
1642
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1643
+ x_samples = self.decode_first_stage(samples.to(self.device))
1644
+ log["samples_outpainting"] = x_samples
1645
+
1646
+ if plot_progressive_rows:
1647
+ with self.ema_scope("Plotting Progressives"):
1648
+ img, progressives = self.progressive_denoising(c,
1649
+ shape=(self.channels, self.image_size, self.image_size),
1650
+ batch_size=N)
1651
+ prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1652
+ log["progressive_row"] = prog_row
1653
+
1654
+ if return_keys:
1655
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1656
+ return log
1657
+ else:
1658
+ return {key: log[key] for key in return_keys}
1659
+ return log
1660
+
1661
+ def configure_optimizers(self):
1662
+ lr = self.learning_rate
1663
+ params = list(self.model.parameters())
1664
+ if self.cond_stage_trainable:
1665
+ print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1666
+ params = params + list(self.cond_stage_model.parameters())
1667
+ if self.learn_logvar:
1668
+ print('Diffusion model optimizing logvar')
1669
+ params.append(self.logvar)
1670
+ opt = torch.optim.AdamW(params, lr=lr)
1671
+ if self.use_scheduler:
1672
+ assert 'target' in self.scheduler_config
1673
+ scheduler = instantiate_from_config(self.scheduler_config)
1674
+
1675
+ print("Setting up LambdaLR scheduler...")
1676
+ scheduler = [
1677
+ {
1678
+ 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1679
+ 'interval': 'step',
1680
+ 'frequency': 1
1681
+ }]
1682
+ return [opt], scheduler
1683
+ return opt
1684
+
1685
+ @torch.no_grad()
1686
+ def to_rgb(self, x):
1687
+ x = x.float()
1688
+ if not hasattr(self, "colorize"):
1689
+ self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1690
+ x = nn.functional.conv2d(x, weight=self.colorize)
1691
+ x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1692
+ return x
1693
+
1694
+
1695
+
1696
+ class DiffusionWrapper(pl.LightningModule):
1697
+ def __init__(self, diff_model_config, conditioning_key):
1698
+ super().__init__()
1699
+ self.diffusion_model = instantiate_from_config(diff_model_config)
1700
+ self.conditioning_key = conditioning_key
1701
+ assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
1702
+
1703
+ def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
1704
+ if self.conditioning_key is None:
1705
+ out = self.diffusion_model(x, t)
1706
+ elif self.conditioning_key == 'concat':
1707
+ xc = torch.cat([x] + c_concat, dim=1)
1708
+ out = self.diffusion_model(xc, t)
1709
+ elif self.conditioning_key == 'crossattn':
1710
+ cc = torch.cat(c_crossattn, 1)
1711
+ out = self.diffusion_model(x, t, context=cc)
1712
+ elif self.conditioning_key == 'hybrid':
1713
+ xc = torch.cat([x] + c_concat, dim=1)
1714
+ cc = torch.cat(c_crossattn, 1)
1715
+ out = self.diffusion_model(xc, t, context=cc)
1716
+ elif self.conditioning_key == 'adm':
1717
+ cc = c_crossattn[0]
1718
+ out = self.diffusion_model(x, t, y=cc)
1719
+ else:
1720
+ raise NotImplementedError()
1721
+
1722
+ return out
1723
+
1724
+
1725
+ class Layout2ImgDiffusion(LatentDiffusion):
1726
+ # TODO: move all layout-specific hacks to this class
1727
+ def __init__(self, cond_stage_key, *args, **kwargs):
1728
+ assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
1729
+ super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs)
1730
+
1731
+ def log_images(self, batch, N=8, *args, **kwargs):
1732
+ logs = super().log_images(batch=batch, N=N, *args, **kwargs)
1733
+
1734
+ key = 'train' if self.training else 'validation'
1735
+ dset = self.trainer.datamodule.datasets[key]
1736
+ mapper = dset.conditional_builders[self.cond_stage_key]
1737
+
1738
+ bbox_imgs = []
1739
+ map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
1740
+ for tknzd_bbox in batch[self.cond_stage_key][:N]:
1741
+ bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
1742
+ bbox_imgs.append(bboximg)
1743
+
1744
+ cond_img = torch.stack(bbox_imgs, dim=0)
1745
+ logs['bbox_image'] = cond_img
1746
+ return logs
3DTopia/ldm/models/diffusion/ddpm_preprocess.py ADDED
@@ -0,0 +1,1716 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ wild mixture of
3
+ https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
+ https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
+ https://github.com/CompVis/taming-transformers
6
+ -- merci
7
+ """
8
+
9
+ import os
10
+ import wandb
11
+ import torch
12
+ import imageio
13
+ import torch.nn as nn
14
+ import numpy as np
15
+ import pytorch_lightning as pl
16
+ from torch.optim.lr_scheduler import LambdaLR
17
+ from einops import rearrange, repeat
18
+ from contextlib import contextmanager
19
+ from functools import partial
20
+ from tqdm import tqdm
21
+ from torchvision.utils import make_grid
22
+ from pytorch_lightning.utilities.rank_zero import rank_zero_only
23
+
24
+ from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
25
+ from ldm.modules.ema import LitEma
26
+ from module.model_2d import DiagonalGaussianDistribution
27
+ from ldm.modules.distributions.distributions import normal_kl
28
+ from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
29
+ from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
30
+ from ldm.models.diffusion.ddim import DDIMSampler
31
+ from utility.triplane_renderer.renderer import to8b
32
+
33
+ import ipdb
34
+ __conditioning_keys__ = {'concat': 'c_concat',
35
+ 'crossattn': 'c_crossattn',
36
+ 'adm': 'y'}
37
+
38
+
39
+ def disabled_train(self, mode=True):
40
+ """Overwrite model.train with this function to make sure train/eval mode
41
+ does not change anymore."""
42
+ return self
43
+
44
+
45
+ def uniform_on_device(r1, r2, shape, device):
46
+ return (r1 - r2) * torch.rand(*shape, device=device) + r2
47
+
48
+
49
+ class DDPM(pl.LightningModule):
50
+ # classic DDPM with Gaussian diffusion, in image space
51
+ def __init__(self,
52
+ unet_config,
53
+ timesteps=1000,
54
+ beta_schedule="linear",
55
+ loss_type="l2",
56
+ ckpt_path=None,
57
+ ignore_keys=[],
58
+ load_only_unet=False,
59
+ monitor="val/loss",
60
+ use_ema=True,
61
+ first_stage_key="image",
62
+ image_size=256,
63
+ channels=3,
64
+ log_every_t=100,
65
+ clip_denoised=True,
66
+ linear_start=1e-4,
67
+ linear_end=2e-2,
68
+ cosine_s=8e-3,
69
+ given_betas=None,
70
+ original_elbo_weight=0.,
71
+ v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
72
+ l_simple_weight=1.,
73
+ conditioning_key=None,
74
+ parameterization="eps", # all assuming fixed variance schedules
75
+ scheduler_config=None,
76
+ use_positional_encodings=False,
77
+ learn_logvar=False,
78
+ logvar_init=0.,
79
+ learning_rate=1e-4,
80
+ shift_scale=None,
81
+ ):
82
+ super().__init__()
83
+ assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
84
+ self.parameterization = parameterization
85
+ print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
86
+ self.cond_stage_model = None
87
+ self.clip_denoised = clip_denoised
88
+ self.log_every_t = log_every_t
89
+ self.first_stage_key = first_stage_key
90
+ self.image_size = image_size # try conv?
91
+ self.channels = channels
92
+ self.use_positional_encodings = use_positional_encodings
93
+ self.beta_schedule = beta_schedule
94
+ self.model = DiffusionWrapper(unet_config, conditioning_key)
95
+ count_params(self.model, verbose=True)
96
+ self.use_ema = use_ema
97
+ if self.use_ema:
98
+ self.model_ema = LitEma(self.model)
99
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
100
+
101
+ self.use_scheduler = scheduler_config is not None
102
+ if self.use_scheduler:
103
+ self.scheduler_config = scheduler_config
104
+
105
+ self.v_posterior = v_posterior
106
+ self.original_elbo_weight = original_elbo_weight
107
+ self.l_simple_weight = l_simple_weight
108
+
109
+ if monitor is not None:
110
+ self.monitor = monitor
111
+ if ckpt_path is not None:
112
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
113
+
114
+ self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
115
+ linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, shift_scale=shift_scale)
116
+
117
+ self.loss_type = loss_type
118
+
119
+ self.learn_logvar = learn_logvar
120
+ self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
121
+ if self.learn_logvar:
122
+ self.logvar = nn.Parameter(self.logvar, requires_grad=True)
123
+
124
+ self.learning_rate = learning_rate
125
+
126
+
127
+ def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
128
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, shift_scale=None):
129
+ if exists(given_betas):
130
+ betas = given_betas
131
+ else:
132
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
133
+ cosine_s=cosine_s, shift_scale=shift_scale)
134
+ alphas = 1. - betas
135
+ alphas_cumprod = np.cumprod(alphas, axis=0)
136
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
137
+
138
+ timesteps, = betas.shape
139
+ self.num_timesteps = int(timesteps)
140
+ print("Using timesteps of {}".format(self.num_timesteps))
141
+ self.linear_start = linear_start
142
+ self.linear_end = linear_end
143
+ assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
144
+
145
+ to_torch = partial(torch.tensor, dtype=torch.float32)
146
+
147
+ self.register_buffer('betas', to_torch(betas))
148
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
149
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
150
+
151
+ # calculations for diffusion q(x_t | x_{t-1}) and others
152
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
153
+ # print("sqrt_alphas_cumprod", np.sqrt(alphas_cumprod))
154
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
155
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
156
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
157
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
158
+
159
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
160
+ posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
161
+ 1. - alphas_cumprod) + self.v_posterior * betas
162
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
163
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
164
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
165
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
166
+ self.register_buffer('posterior_mean_coef1', to_torch(
167
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
168
+ self.register_buffer('posterior_mean_coef2', to_torch(
169
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
170
+
171
+ if self.parameterization == "eps":
172
+ lvlb_weights = self.betas ** 2 / (
173
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
174
+ elif self.parameterization == "x0":
175
+ lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
176
+ elif self.parameterization == "v":
177
+ lvlb_weights = torch.ones_like(self.betas ** 2 / (
178
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
179
+ else:
180
+ raise NotImplementedError("mu not supported")
181
+ # TODO how to choose this term
182
+ lvlb_weights[0] = lvlb_weights[1]
183
+ self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
184
+ assert not torch.isnan(self.lvlb_weights).all()
185
+
186
+ @contextmanager
187
+ def ema_scope(self, context=None):
188
+ if self.use_ema:
189
+ self.model_ema.store(self.model.parameters())
190
+ self.model_ema.copy_to(self.model)
191
+ if context is not None:
192
+ print(f"{context}: Switched to EMA weights")
193
+ try:
194
+ yield None
195
+ finally:
196
+ if self.use_ema:
197
+ self.model_ema.restore(self.model.parameters())
198
+ if context is not None:
199
+ print(f"{context}: Restored training weights")
200
+
201
+ def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
202
+ sd = torch.load(path, map_location="cpu")
203
+ if "state_dict" in list(sd.keys()):
204
+ sd = sd["state_dict"]
205
+ keys = list(sd.keys())
206
+ for k in keys:
207
+ for ik in ignore_keys:
208
+ if k.startswith(ik):
209
+ print("Deleting key {} from state_dict.".format(k))
210
+ del sd[k]
211
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
212
+ sd, strict=False)
213
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
214
+ if len(missing) > 0:
215
+ print(f"Missing Keys: {missing}")
216
+ if len(unexpected) > 0:
217
+ print(f"Unexpected Keys: {unexpected}")
218
+
219
+ def q_mean_variance(self, x_start, t):
220
+ """
221
+ Get the distribution q(x_t | x_0).
222
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
223
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
224
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
225
+ """
226
+ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
227
+ variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
228
+ log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
229
+ return mean, variance, log_variance
230
+
231
+ def predict_start_from_noise(self, x_t, t, noise):
232
+ return (
233
+ extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
234
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
235
+ )
236
+
237
+ def predict_start_from_z_and_v(self, x_t, t, v):
238
+ # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
239
+ # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
240
+ return (
241
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
242
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
243
+ )
244
+
245
+ def predict_eps_from_z_and_v(self, x_t, t, v):
246
+ return (
247
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
248
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
249
+ )
250
+
251
+ def q_posterior(self, x_start, x_t, t):
252
+ posterior_mean = (
253
+ extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
254
+ extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
255
+ )
256
+ posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
257
+ posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
258
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
259
+
260
+ def p_mean_variance(self, x, t, clip_denoised: bool):
261
+ model_out = self.model(x, t)
262
+ if self.parameterization == "eps":
263
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
264
+ elif self.parameterization == "x0":
265
+ x_recon = model_out
266
+ elif self.parameterization == "v":
267
+ x_recon = self.predict_start_from_z_and_v(x, t, model_out)
268
+ if clip_denoised:
269
+ x_recon.clamp_(-1., 1.)
270
+
271
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
272
+ return model_mean, posterior_variance, posterior_log_variance
273
+
274
+ @torch.no_grad()
275
+ def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
276
+ b, *_, device = *x.shape, x.device
277
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
278
+ noise = noise_like(x.shape, device, repeat_noise)
279
+ # no noise when t == 0
280
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
281
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
282
+
283
+ @torch.no_grad()
284
+ def p_sample_loop(self, shape, return_intermediates=False):
285
+ device = self.betas.device
286
+ b = shape[0]
287
+ img = torch.randn(shape, device=device)
288
+ intermediates = [img]
289
+ for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
290
+ img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
291
+ clip_denoised=self.clip_denoised)
292
+ if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
293
+ intermediates.append(img)
294
+ if return_intermediates:
295
+ return img, intermediates
296
+ return img
297
+
298
+ @torch.no_grad()
299
+ def sample(self, batch_size=16, return_intermediates=False):
300
+ image_size = self.image_size
301
+ channels = self.channels
302
+ return self.p_sample_loop((batch_size, channels, image_size, image_size),
303
+ return_intermediates=return_intermediates)
304
+
305
+ def q_sample(self, x_start, t, noise=None):
306
+ noise = default(noise, lambda: torch.randn_like(x_start))
307
+ return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
308
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
309
+
310
+ def get_v(self, x, noise, t):
311
+ return (
312
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
313
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
314
+ )
315
+
316
+ def get_loss(self, pred, target, mean=True):
317
+ if self.loss_type == 'l1':
318
+ loss = (target - pred).abs()
319
+ if mean:
320
+ loss = loss.mean()
321
+ elif self.loss_type == 'l2':
322
+ if mean:
323
+ loss = torch.nn.functional.mse_loss(target, pred)
324
+ else:
325
+ loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
326
+ else:
327
+ raise NotImplementedError("unknown loss type '{loss_type}'")
328
+
329
+ return loss
330
+
331
+ def p_losses(self, x_start, t, noise=None):
332
+ noise = default(noise, lambda: torch.randn_like(x_start))
333
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
334
+ model_out = self.model(x_noisy, t)
335
+
336
+ loss_dict = {}
337
+ if self.parameterization == "eps":
338
+ target = noise
339
+ elif self.parameterization == "x0":
340
+ target = x_start
341
+ elif self.parameterization == "v":
342
+ target = self.get_v(x_start, noise, t)
343
+ else:
344
+ raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
345
+
346
+ loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
347
+
348
+ log_prefix = 'train' if self.training else 'val'
349
+
350
+ loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
351
+ loss_simple = loss.mean() * self.l_simple_weight
352
+
353
+ loss_vlb = (self.lvlb_weights[t] * loss).mean()
354
+ loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
355
+
356
+ loss = loss_simple + self.original_elbo_weight * loss_vlb
357
+
358
+ loss_dict.update({f'{log_prefix}/loss': loss})
359
+
360
+ return loss, loss_dict
361
+
362
+ def forward(self, x, *args, **kwargs):
363
+ # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
364
+ # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
365
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
366
+ return self.p_losses(x, t, *args, **kwargs)
367
+
368
+ def get_input(self, batch, k):
369
+ x = batch[k]
370
+ if isinstance(x, list):
371
+ return x
372
+ # if len(x.shape) == 3:
373
+ # x = x[..., None]
374
+ # x = rearrange(x, 'b h w c -> b c h w')
375
+ x = x.to(memory_format=torch.contiguous_format).float()
376
+ return x
377
+
378
+ def shared_step(self, batch):
379
+ x = self.get_input(batch, self.first_stage_key)
380
+ loss, loss_dict = self(x)
381
+ return loss, loss_dict
382
+
383
+ def training_step(self, batch, batch_idx):
384
+ loss, loss_dict = self.shared_step(batch)
385
+
386
+ self.log_dict(loss_dict, prog_bar=False,
387
+ logger=True, on_step=True, on_epoch=True)
388
+
389
+ self.log("global_step", self.global_step,
390
+ prog_bar=False, logger=True, on_step=True, on_epoch=False)
391
+
392
+ if self.use_scheduler:
393
+ lr = self.optimizers().param_groups[0]['lr']
394
+ self.log('lr_abs', lr, prog_bar=False, logger=True, on_step=True, on_epoch=False)
395
+
396
+ return loss
397
+
398
+ @torch.no_grad()
399
+ def validation_step(self, batch, batch_idx):
400
+ _, loss_dict_no_ema = self.shared_step(batch)
401
+ with self.ema_scope():
402
+ _, loss_dict_ema = self.shared_step(batch)
403
+ loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
404
+ self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
405
+ self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
406
+
407
+ def on_train_batch_end(self, *args, **kwargs):
408
+ if self.use_ema:
409
+ self.model_ema(self.model)
410
+
411
+ def _get_rows_from_list(self, samples):
412
+ n_imgs_per_row = len(samples)
413
+ denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
414
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
415
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
416
+ return denoise_grid
417
+
418
+ @torch.no_grad()
419
+ def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
420
+ log = dict()
421
+ x = self.get_input(batch, self.first_stage_key)
422
+ N = min(x.shape[0], N)
423
+ n_row = min(x.shape[0], n_row)
424
+ x = x.to(self.device)[:N]
425
+ log["inputs"] = x
426
+
427
+ # get diffusion row
428
+ diffusion_row = list()
429
+ x_start = x[:n_row]
430
+
431
+ for t in range(self.num_timesteps):
432
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
433
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
434
+ t = t.to(self.device).long()
435
+ noise = torch.randn_like(x_start)
436
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
437
+ diffusion_row.append(x_noisy)
438
+
439
+ log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
440
+
441
+ if sample:
442
+ # get denoise row
443
+ with self.ema_scope("Plotting"):
444
+ samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
445
+
446
+ log["samples"] = samples
447
+ log["denoise_row"] = self._get_rows_from_list(denoise_row)
448
+
449
+ if return_keys:
450
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
451
+ return log
452
+ else:
453
+ return {key: log[key] for key in return_keys}
454
+ return log
455
+
456
+ def configure_optimizers(self):
457
+ lr = self.learning_rate
458
+ params = list(self.model.parameters())
459
+ if self.learn_logvar:
460
+ params = params + [self.logvar]
461
+ opt = torch.optim.AdamW(params, lr=lr)
462
+ return opt
463
+
464
+
465
+ class LatentDiffusion(DDPM):
466
+ """main class"""
467
+ def __init__(self,
468
+ first_stage_config,
469
+ cond_stage_config,
470
+ num_timesteps_cond=None,
471
+ cond_stage_key="image",
472
+ cond_stage_trainable=False,
473
+ concat_mode=True,
474
+ cond_stage_forward=None,
475
+ conditioning_key=None,
476
+ scale_factor=1.0,
477
+ scale_shift=0.0,
478
+ scale_by_std=False,
479
+ use_3daware=False,
480
+ *args, **kwargs):
481
+ self.num_timesteps_cond = default(num_timesteps_cond, 1)
482
+ self.scale_by_std = scale_by_std
483
+ assert self.num_timesteps_cond <= kwargs['timesteps']
484
+ # for backwards compatibility after implementation of DiffusionWrapper
485
+ if conditioning_key is None:
486
+ conditioning_key = 'concat' if concat_mode else 'crossattn'
487
+ if cond_stage_config == '__is_unconditional__':
488
+ conditioning_key = None
489
+ ckpt_path = kwargs.pop("ckpt_path", None)
490
+ ignore_keys = kwargs.pop("ignore_keys", [])
491
+ super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
492
+ self.concat_mode = concat_mode
493
+ self.cond_stage_trainable = cond_stage_trainable
494
+ self.cond_stage_key = cond_stage_key
495
+ try:
496
+ self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
497
+ except:
498
+ self.num_downs = 0
499
+ if not scale_by_std:
500
+ self.scale_factor = scale_factor
501
+ self.scale_shift = scale_shift
502
+ else:
503
+ self.register_buffer('scale_factor', torch.tensor(scale_factor))
504
+ self.instantiate_first_stage(first_stage_config)
505
+ # self.instantiate_cond_stage(cond_stage_config)
506
+ self.cond_stage_forward = cond_stage_forward
507
+ self.clip_denoised = False
508
+ self.bbox_tokenizer = None
509
+
510
+ self.restarted_from_ckpt = False
511
+ if ckpt_path is not None:
512
+ self.init_from_ckpt(ckpt_path, ignore_keys)
513
+ self.restarted_from_ckpt = True
514
+
515
+ self.use_3daware = use_3daware
516
+
517
+ self.is_test = False
518
+
519
+ self.test_mode = None
520
+ self.test_tag = ""
521
+
522
+ def make_cond_schedule(self, ):
523
+ self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
524
+ ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
525
+ self.cond_ids[:self.num_timesteps_cond] = ids
526
+
527
+ @rank_zero_only
528
+ @torch.no_grad()
529
+ def on_train_batch_start(self, batch, batch_idx):
530
+ # only for very first batch
531
+ if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
532
+ assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
533
+ # set rescale weight to 1./std of encodings
534
+ print("### USING STD-RESCALING ###")
535
+ x = super().get_input(batch, self.first_stage_key)
536
+ x = x.to(self.device)
537
+ encoder_posterior = self.encode_first_stage(x)
538
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
539
+ del self.scale_factor
540
+ self.register_buffer('scale_factor', 1. / z.flatten().std())
541
+ print(f"setting self.scale_factor to {self.scale_factor}")
542
+ print("### USING STD-RESCALING ###")
543
+
544
+ def register_schedule(self,
545
+ given_betas=None, beta_schedule="linear", timesteps=1000,
546
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, shift_scale=None):
547
+ super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s, shift_scale)
548
+
549
+ self.shorten_cond_schedule = self.num_timesteps_cond > 1
550
+ if self.shorten_cond_schedule:
551
+ self.make_cond_schedule()
552
+
553
+ def instantiate_first_stage(self, config):
554
+ model = instantiate_from_config(config)
555
+ self.first_stage_model = model.eval()
556
+ self.first_stage_model.train = disabled_train
557
+ for param in self.first_stage_model.parameters():
558
+ param.requires_grad = False
559
+
560
+ def instantiate_cond_stage(self, config):
561
+ if not self.cond_stage_trainable:
562
+ if config == "__is_first_stage__":
563
+ print("Using first stage also as cond stage.")
564
+ self.cond_stage_model = self.first_stage_model
565
+ elif config == "__is_unconditional__":
566
+ print(f"Training {self.__class__.__name__} as an unconditional model.")
567
+ self.cond_stage_model = None
568
+ # self.be_unconditional = True
569
+ else:
570
+ model = instantiate_from_config(config)
571
+ self.cond_stage_model = model.eval()
572
+ self.cond_stage_model.train = disabled_train
573
+ for param in self.cond_stage_model.parameters():
574
+ param.requires_grad = False
575
+ else:
576
+ assert config != '__is_first_stage__'
577
+ assert config != '__is_unconditional__'
578
+ model = instantiate_from_config(config)
579
+ self.cond_stage_model = model
580
+
581
+ def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
582
+ denoise_row = []
583
+ for zd in tqdm(samples, desc=desc):
584
+ denoise_row.append(self.decode_first_stage(zd.to(self.device),
585
+ force_not_quantize=force_no_decoder_quantization))
586
+ n_imgs_per_row = len(denoise_row)
587
+ denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
588
+ denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
589
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
590
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
591
+ return denoise_grid
592
+
593
+ def get_first_stage_encoding(self, encoder_posterior):
594
+ if isinstance(encoder_posterior, DiagonalGaussianDistribution):
595
+ z = encoder_posterior.sample()
596
+ elif isinstance(encoder_posterior, torch.Tensor):
597
+ z = encoder_posterior
598
+ else:
599
+ raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
600
+ return self.scale_factor * (z + self.scale_shift)
601
+
602
+ def get_learned_conditioning(self, c):
603
+ if self.cond_stage_forward is None:
604
+ if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
605
+ c = self.cond_stage_model.encode(c)
606
+ if isinstance(c, DiagonalGaussianDistribution):
607
+ c = c.mode()
608
+ else:
609
+ c = self.cond_stage_model(c).float()
610
+ else:
611
+ assert hasattr(self.cond_stage_model, self.cond_stage_forward)
612
+ c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
613
+ return c
614
+
615
+ def meshgrid(self, h, w):
616
+ y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
617
+ x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
618
+
619
+ arr = torch.cat([y, x], dim=-1)
620
+ return arr
621
+
622
+ def delta_border(self, h, w):
623
+ """
624
+ :param h: height
625
+ :param w: width
626
+ :return: normalized distance to image border,
627
+ wtith min distance = 0 at border and max dist = 0.5 at image center
628
+ """
629
+ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
630
+ arr = self.meshgrid(h, w) / lower_right_corner
631
+ dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
632
+ dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
633
+ edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
634
+ return edge_dist
635
+
636
+ def get_weighting(self, h, w, Ly, Lx, device):
637
+ weighting = self.delta_border(h, w)
638
+ weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
639
+ self.split_input_params["clip_max_weight"], )
640
+ weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
641
+
642
+ if self.split_input_params["tie_braker"]:
643
+ L_weighting = self.delta_border(Ly, Lx)
644
+ L_weighting = torch.clip(L_weighting,
645
+ self.split_input_params["clip_min_tie_weight"],
646
+ self.split_input_params["clip_max_tie_weight"])
647
+
648
+ L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
649
+ weighting = weighting * L_weighting
650
+ return weighting
651
+
652
+ def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
653
+ """
654
+ :param x: img of size (bs, c, h, w)
655
+ :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
656
+ """
657
+ bs, nc, h, w = x.shape
658
+
659
+ # number of crops in image
660
+ Ly = (h - kernel_size[0]) // stride[0] + 1
661
+ Lx = (w - kernel_size[1]) // stride[1] + 1
662
+
663
+ if uf == 1 and df == 1:
664
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
665
+ unfold = torch.nn.Unfold(**fold_params)
666
+
667
+ fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
668
+
669
+ weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
670
+ normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
671
+ weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
672
+
673
+ elif uf > 1 and df == 1:
674
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
675
+ unfold = torch.nn.Unfold(**fold_params)
676
+
677
+ fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
678
+ dilation=1, padding=0,
679
+ stride=(stride[0] * uf, stride[1] * uf))
680
+ fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
681
+
682
+ weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
683
+ normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
684
+ weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
685
+
686
+ elif df > 1 and uf == 1:
687
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
688
+ unfold = torch.nn.Unfold(**fold_params)
689
+
690
+ fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
691
+ dilation=1, padding=0,
692
+ stride=(stride[0] // df, stride[1] // df))
693
+ fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
694
+
695
+ weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
696
+ normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
697
+ weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
698
+
699
+ else:
700
+ raise NotImplementedError
701
+
702
+ return fold, unfold, normalization, weighting
703
+
704
+ @torch.no_grad()
705
+ def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
706
+ cond_key=None, return_original_cond=False, bs=None):
707
+ #ipdb.set_trace()
708
+ x = super().get_input(batch, k)
709
+ if bs is not None:
710
+ x = x[:bs]
711
+ #ipdb.set_trace()
712
+ z = x.to(self.device) #[:1,:8,:32,:96]
713
+ z = self.scale_factor * (z + self.scale_shift)
714
+ # encoder_posterior = self.encode_first_stage(x)
715
+ # z = self.get_first_stage_encoding(encoder_posterior).detach()
716
+ #ipdb.set_trace()
717
+ if self.model.conditioning_key is not None:
718
+ if cond_key is None:
719
+ cond_key = self.cond_stage_key
720
+ if cond_key != self.first_stage_key:
721
+ if cond_key in ['caption', 'coordinates_bbox']:
722
+ xc = batch[cond_key]
723
+ elif cond_key == 'class_label':
724
+ xc = batch
725
+ else:
726
+ xc = super().get_input(batch, cond_key).to(self.device)
727
+ else:
728
+ xc = x
729
+ #ipdb.set_trace()
730
+ c = xc
731
+ if bs is not None:
732
+ c = c[:bs]
733
+
734
+ if self.use_positional_encodings:
735
+ pos_x, pos_y = self.compute_latent_shifts(batch)
736
+ ckey = __conditioning_keys__[self.model.conditioning_key]
737
+ c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
738
+
739
+ else:
740
+ c = None
741
+ xc = None
742
+ if self.use_positional_encodings:
743
+ pos_x, pos_y = self.compute_latent_shifts(batch)
744
+ c = {'pos_x': pos_x, 'pos_y': pos_y}
745
+ out = [z, c]
746
+ if return_first_stage_outputs:
747
+ xrec = self.decode_first_stage(z)
748
+ out.extend([x, xrec])
749
+ if return_original_cond:
750
+ out.append(xc)
751
+
752
+ return out
753
+
754
+ @torch.no_grad()
755
+ def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
756
+ # assert not predict_cids
757
+ # if predict_cids:
758
+ # if z.dim() == 4:
759
+ # z = torch.argmax(z.exp(), dim=1).long()
760
+ # z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
761
+ # z = rearrange(z, 'b h w c -> b c h w').contiguous()
762
+
763
+ # import os
764
+ # import random
765
+ # import string
766
+ # z_np = z.detach().cpu().numpy()
767
+ # fname = ''.join(random.choices(string.ascii_uppercase + string.digits, k=8)) + '.npy'
768
+ # with open(os.path.join('/mnt/lustre/hongfangzhou.p/AE3D/tmp', fname), 'wb') as f:
769
+ # np.save(f, z_np)
770
+
771
+ z = 1. / self.scale_factor * z - self.scale_shift
772
+
773
+ # if hasattr(self, "split_input_params"):
774
+ # if self.split_input_params["patch_distributed_vq"]:
775
+ # ks = self.split_input_params["ks"] # eg. (128, 128)
776
+ # stride = self.split_input_params["stride"] # eg. (64, 64)
777
+ # uf = self.split_input_params["vqf"]
778
+ # bs, nc, h, w = z.shape
779
+ # if ks[0] > h or ks[1] > w:
780
+ # ks = (min(ks[0], h), min(ks[1], w))
781
+ # print("reducing Kernel")
782
+
783
+ # if stride[0] > h or stride[1] > w:
784
+ # stride = (min(stride[0], h), min(stride[1], w))
785
+ # print("reducing stride")
786
+
787
+ # fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
788
+
789
+ # z = unfold(z) # (bn, nc * prod(**ks), L)
790
+ # # 1. Reshape to img shape
791
+ # z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
792
+
793
+ # # 2. apply model loop over last dim
794
+ # if isinstance(self.first_stage_model, VQModelInterface):
795
+ # output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
796
+ # force_not_quantize=predict_cids or force_not_quantize)
797
+ # for i in range(z.shape[-1])]
798
+ # else:
799
+
800
+ # output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
801
+ # for i in range(z.shape[-1])]
802
+
803
+ # o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
804
+ # o = o * weighting
805
+ # # Reverse 1. reshape to img shape
806
+ # o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
807
+ # # stitch crops together
808
+ # decoded = fold(o)
809
+ # decoded = decoded / normalization # norm is shape (1, 1, h, w)
810
+ # return decoded
811
+ # else:
812
+ # if isinstance(self.first_stage_model, VQModelInterface):
813
+ # return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
814
+ # else:
815
+ # return self.first_stage_model.decode(z)
816
+
817
+ # else:
818
+ # if isinstance(self.first_stage_model, VQModelInterface):
819
+ # return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
820
+ # else:
821
+ return self.first_stage_model.decode(z, unrollout=True)
822
+
823
+ # same as above but without decorator
824
+ def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
825
+ if predict_cids:
826
+ if z.dim() == 4:
827
+ z = torch.argmax(z.exp(), dim=1).long()
828
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
829
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
830
+
831
+ z = 1. / self.scale_factor * z - self.scale_shift
832
+
833
+ if hasattr(self, "split_input_params"):
834
+ if self.split_input_params["patch_distributed_vq"]:
835
+ ks = self.split_input_params["ks"] # eg. (128, 128)
836
+ stride = self.split_input_params["stride"] # eg. (64, 64)
837
+ uf = self.split_input_params["vqf"]
838
+ bs, nc, h, w = z.shape
839
+ if ks[0] > h or ks[1] > w:
840
+ ks = (min(ks[0], h), min(ks[1], w))
841
+ print("reducing Kernel")
842
+
843
+ if stride[0] > h or stride[1] > w:
844
+ stride = (min(stride[0], h), min(stride[1], w))
845
+ print("reducing stride")
846
+
847
+ fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
848
+
849
+ z = unfold(z) # (bn, nc * prod(**ks), L)
850
+ # 1. Reshape to img shape
851
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
852
+
853
+ # 2. apply model loop over last dim
854
+ if isinstance(self.first_stage_model, VQModelInterface):
855
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
856
+ force_not_quantize=predict_cids or force_not_quantize)
857
+ for i in range(z.shape[-1])]
858
+ else:
859
+
860
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
861
+ for i in range(z.shape[-1])]
862
+
863
+ o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
864
+ o = o * weighting
865
+ # Reverse 1. reshape to img shape
866
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
867
+ # stitch crops together
868
+ decoded = fold(o)
869
+ decoded = decoded / normalization # norm is shape (1, 1, h, w)
870
+ return decoded
871
+ else:
872
+ if isinstance(self.first_stage_model, VQModelInterface):
873
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
874
+ else:
875
+ return self.first_stage_model.decode(z)
876
+
877
+ else:
878
+ if isinstance(self.first_stage_model, VQModelInterface):
879
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
880
+ else:
881
+ return self.first_stage_model.decode(z)
882
+
883
+ @torch.no_grad()
884
+ def encode_first_stage(self, x):
885
+ # if hasattr(self, "split_input_params"):
886
+ # if self.split_input_params["patch_distributed_vq"]:
887
+ # ks = self.split_input_params["ks"] # eg. (128, 128)
888
+ # stride = self.split_input_params["stride"] # eg. (64, 64)
889
+ # df = self.split_input_params["vqf"]
890
+ # self.split_input_params['original_image_size'] = x.shape[-2:]
891
+ # bs, nc, h, w = x.shape
892
+ # if ks[0] > h or ks[1] > w:
893
+ # ks = (min(ks[0], h), min(ks[1], w))
894
+ # print("reducing Kernel")
895
+
896
+ # if stride[0] > h or stride[1] > w:
897
+ # stride = (min(stride[0], h), min(stride[1], w))
898
+ # print("reducing stride")
899
+
900
+ # fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
901
+ # z = unfold(x) # (bn, nc * prod(**ks), L)
902
+ # # Reshape to img shape
903
+ # z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
904
+
905
+ # output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
906
+ # for i in range(z.shape[-1])]
907
+
908
+ # o = torch.stack(output_list, axis=-1)
909
+ # o = o * weighting
910
+
911
+ # # Reverse reshape to img shape
912
+ # o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
913
+ # # stitch crops together
914
+ # decoded = fold(o)
915
+ # decoded = decoded / normalization
916
+ # return decoded
917
+
918
+ # else:
919
+ # return self.first_stage_model.encode(x)
920
+ # else:
921
+ return self.first_stage_model.encode(x, rollout=True)
922
+
923
+ def shared_step(self, batch, **kwargs):
924
+ x, c = self.get_input(batch, self.first_stage_key)
925
+ loss = self(x, c)
926
+ return loss
927
+
928
+ def forward(self, x, cond=None, return_inter=False, *args, **kwargs):
929
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
930
+ #ipdb.set_trace()
931
+ if self.model.conditioning_key is not None:
932
+ assert cond is not None
933
+ if self.cond_stage_trainable:
934
+ cond = self.get_learned_conditioning(cond)
935
+ if self.shorten_cond_schedule: # TODO: drop this option
936
+ tc = self.cond_ids[t].to(self.device)
937
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond.float()))
938
+ #ipdb.set_trace()
939
+ return self.p_losses(x, cond, t, return_inter=return_inter, *args, **kwargs)
940
+
941
+ def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
942
+ def rescale_bbox(bbox):
943
+ x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
944
+ y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
945
+ w = min(bbox[2] / crop_coordinates[2], 1 - x0)
946
+ h = min(bbox[3] / crop_coordinates[3], 1 - y0)
947
+ return x0, y0, w, h
948
+
949
+ return [rescale_bbox(b) for b in bboxes]
950
+
951
+ def to3daware(self, triplane):
952
+ res = triplane.shape[-2]
953
+ plane1 = triplane[..., :res]
954
+ plane2 = triplane[..., res:2*res]
955
+ plane3 = triplane[..., 2*res:3*res]
956
+
957
+ x_mp = torch.nn.AvgPool2d((res, 1))
958
+ y_mp = torch.nn.AvgPool2d((1, res))
959
+ x_mp_rep = lambda i: x_mp(i).repeat(1, 1, res, 1).permute(0, 1, 3, 2)
960
+ y_mp_rep = lambda i: y_mp(i).repeat(1, 1, 1, res).permute(0, 1, 3, 2)
961
+ # for plane1
962
+ plane21 = x_mp_rep(plane2)
963
+ plane31 = torch.flip(y_mp_rep(plane3), (3,))
964
+ new_plane1 = torch.cat([plane1, plane21, plane31], 1)
965
+ # for plane2
966
+ plane12 = y_mp_rep(plane1)
967
+ plane32 = x_mp_rep(plane3)
968
+ new_plane2 = torch.cat([plane2, plane12, plane32], 1)
969
+ # for plane3
970
+ plane13 = torch.flip(x_mp_rep(plane1), (2,))
971
+ plane23 = y_mp_rep(plane2)
972
+ new_plane3 = torch.cat([plane3, plane13, plane23], 1)
973
+
974
+ new_plane = torch.cat([new_plane1, new_plane2, new_plane3], -1).contiguous()
975
+ return new_plane
976
+
977
+ # B, C, H, W = h.shape
978
+ # h_xy = th.cat([h[..., 0:(W//3)], h[..., (W//3):(2*W//3)].mean(-1).unsqueeze(-1).repeat(1, 1, 1, W//3), h[..., (2*W//3):W].mean(-2).unsqueeze(-2).repeat(1, 1, H, 1)], 1)
979
+ # h_xz = th.cat([h[..., (W//3):(2*W//3)], h[..., 0:(W//3)].mean(-1).unsqueeze(-1).repeat(1, 1, 1, W//3), h[..., (2*W//3):W].mean(-1).unsqueeze(-1).repeat(1, 1, 1, W//3)], 1)
980
+ # h_zy = th.cat([h[..., (2*W//3):W], h[..., 0:(W//3)].mean(-2).unsqueeze(-2).repeat(1, 1, H, 1), h[..., (W//3):(2*W//3)].mean(-2).unsqueeze(-2).repeat(1, 1, H, 1)], 1)
981
+ # h = th.cat([h_xy, h_xz, h_zy], -1)
982
+
983
+ def apply_model(self, x_noisy, t, cond, return_ids=False):
984
+ #ipdb.set_trace()
985
+ if isinstance(cond, dict):
986
+ # hybrid case, cond is exptected to be a dict
987
+ pass
988
+ else:
989
+ if not isinstance(cond, list):
990
+ cond = [cond]
991
+ key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
992
+ cond = {key: cond}
993
+
994
+ if hasattr(self, "split_input_params"):
995
+ assert len(cond) == 1 # todo can only deal with one conditioning atm
996
+ assert not return_ids
997
+ ks = self.split_input_params["ks"] # eg. (128, 128)
998
+ stride = self.split_input_params["stride"] # eg. (64, 64)
999
+
1000
+ h, w = x_noisy.shape[-2:]
1001
+
1002
+ fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
1003
+
1004
+ z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
1005
+ # Reshape to img shape
1006
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
1007
+ z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
1008
+
1009
+ if self.cond_stage_key in ["image", "LR_image", "segmentation",
1010
+ 'bbox_img'] and self.model.conditioning_key: # todo check for completeness
1011
+ c_key = next(iter(cond.keys())) # get key
1012
+ c = next(iter(cond.values())) # get value
1013
+ assert (len(c) == 1) # todo extend to list with more than one elem
1014
+ c = c[0] # get element
1015
+
1016
+ c = unfold(c)
1017
+ c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
1018
+
1019
+ cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
1020
+
1021
+ elif self.cond_stage_key == 'coordinates_bbox':
1022
+ assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
1023
+
1024
+ # assuming padding of unfold is always 0 and its dilation is always 1
1025
+ n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
1026
+ full_img_h, full_img_w = self.split_input_params['original_image_size']
1027
+ # as we are operating on latents, we need the factor from the original image size to the
1028
+ # spatial latent size to properly rescale the crops for regenerating the bbox annotations
1029
+ num_downs = self.first_stage_model.encoder.num_resolutions - 1
1030
+ rescale_latent = 2 ** (num_downs)
1031
+
1032
+ # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
1033
+ # need to rescale the tl patch coordinates to be in between (0,1)
1034
+ tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
1035
+ rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
1036
+ for patch_nr in range(z.shape[-1])]
1037
+
1038
+ # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
1039
+ patch_limits = [(x_tl, y_tl,
1040
+ rescale_latent * ks[0] / full_img_w,
1041
+ rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
1042
+ # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
1043
+
1044
+ # tokenize crop coordinates for the bounding boxes of the respective patches
1045
+ patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
1046
+ for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
1047
+ print(patch_limits_tknzd[0].shape)
1048
+ # cut tknzd crop position from conditioning
1049
+ assert isinstance(cond, dict), 'cond must be dict to be fed into model'
1050
+ cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
1051
+ print(cut_cond.shape)
1052
+
1053
+ adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
1054
+ adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
1055
+ print(adapted_cond.shape)
1056
+ adapted_cond = self.get_learned_conditioning(adapted_cond)
1057
+ print(adapted_cond.shape)
1058
+ adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
1059
+ print(adapted_cond.shape)
1060
+
1061
+ cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
1062
+
1063
+ else:
1064
+ cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
1065
+
1066
+ # apply model by loop over crops
1067
+ output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
1068
+ assert not isinstance(output_list[0],
1069
+ tuple) # todo cant deal with multiple model outputs check this never happens
1070
+
1071
+ o = torch.stack(output_list, axis=-1)
1072
+ o = o * weighting
1073
+ # Reverse reshape to img shape
1074
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
1075
+ # stitch crops together
1076
+ x_recon = fold(o) / normalization
1077
+
1078
+ else:
1079
+ if self.use_3daware:
1080
+ x_noisy_3daware = self.to3daware(x_noisy)
1081
+ x_recon = self.model(x_noisy_3daware, t, **cond)
1082
+ else:
1083
+ x_recon = self.model(x_noisy, t, **cond)
1084
+
1085
+ if isinstance(x_recon, tuple) and not return_ids:
1086
+ return x_recon[0]
1087
+ else:
1088
+ return x_recon
1089
+
1090
+ def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
1091
+ return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
1092
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
1093
+
1094
+ def _prior_bpd(self, x_start):
1095
+ """
1096
+ Get the prior KL term for the variational lower-bound, measured in
1097
+ bits-per-dim.
1098
+ This term can't be optimized, as it only depends on the encoder.
1099
+ :param x_start: the [N x C x ...] tensor of inputs.
1100
+ :return: a batch of [N] KL values (in bits), one per batch element.
1101
+ """
1102
+ batch_size = x_start.shape[0]
1103
+ t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
1104
+ qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
1105
+ kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
1106
+ return mean_flat(kl_prior) / np.log(2.0)
1107
+
1108
+ def p_losses(self, x_start, cond, t, noise=None, return_inter=False):
1109
+ noise = default(noise, lambda: torch.randn_like(x_start))
1110
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
1111
+ model_output = self.apply_model(x_noisy, t, cond)
1112
+
1113
+ loss_dict = {}
1114
+ prefix = 'train' if self.training else 'val'
1115
+
1116
+ if self.parameterization == "x0":
1117
+ target = x_start
1118
+ elif self.parameterization == "eps":
1119
+ target = noise
1120
+ elif self.parameterization == "v":
1121
+ target = self.get_v(x_start, noise, t)
1122
+ else:
1123
+ raise NotImplementedError()
1124
+
1125
+ loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
1126
+ loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
1127
+
1128
+ logvar_t = self.logvar[t.to(self.logvar.device)].to(self.device)
1129
+ loss = loss_simple / torch.exp(logvar_t) + logvar_t
1130
+ # loss = loss_simple / torch.exp(self.logvar) + self.logvar
1131
+ if self.learn_logvar:
1132
+ loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
1133
+ loss_dict.update({'logvar': self.logvar.data.mean()})
1134
+
1135
+ loss = self.l_simple_weight * loss.mean()
1136
+
1137
+ loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
1138
+ loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
1139
+ loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
1140
+ loss += (self.original_elbo_weight * loss_vlb)
1141
+ loss_dict.update({f'{prefix}/loss': loss})
1142
+
1143
+ if return_inter:
1144
+ return loss, loss_dict, self.predict_start_from_noise(x_noisy, t=t, noise=model_output)
1145
+ else:
1146
+ return loss, loss_dict
1147
+
1148
+ def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
1149
+ return_x0=False, score_corrector=None, corrector_kwargs=None):
1150
+ t_in = t
1151
+ model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
1152
+
1153
+ if score_corrector is not None:
1154
+ assert self.parameterization == "eps"
1155
+ model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
1156
+
1157
+ if return_codebook_ids:
1158
+ model_out, logits = model_out
1159
+
1160
+ if self.parameterization == "eps":
1161
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
1162
+ elif self.parameterization == "x0":
1163
+ x_recon = model_out
1164
+ elif self.parameterization == "v":
1165
+ x_recon = self.predict_start_from_z_and_v(x, t, model_out)
1166
+ else:
1167
+ raise NotImplementedError()
1168
+
1169
+ if clip_denoised:
1170
+ x_recon.clamp_(-1., 1.)
1171
+ if quantize_denoised:
1172
+ x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
1173
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
1174
+ if return_codebook_ids:
1175
+ return model_mean, posterior_variance, posterior_log_variance, logits
1176
+ elif return_x0:
1177
+ return model_mean, posterior_variance, posterior_log_variance, x_recon
1178
+ else:
1179
+ return model_mean, posterior_variance, posterior_log_variance
1180
+
1181
+ @torch.no_grad()
1182
+ def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
1183
+ return_codebook_ids=False, quantize_denoised=False, return_x0=False,
1184
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
1185
+ b, *_, device = *x.shape, x.device
1186
+ outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
1187
+ return_codebook_ids=return_codebook_ids,
1188
+ quantize_denoised=quantize_denoised,
1189
+ return_x0=return_x0,
1190
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1191
+ if return_codebook_ids:
1192
+ raise DeprecationWarning("Support dropped.")
1193
+ model_mean, _, model_log_variance, logits = outputs
1194
+ elif return_x0:
1195
+ model_mean, _, model_log_variance, x0 = outputs
1196
+ else:
1197
+ model_mean, _, model_log_variance = outputs
1198
+
1199
+ noise = noise_like(x.shape, device, repeat_noise) * temperature
1200
+ if noise_dropout > 0.:
1201
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
1202
+ # no noise when t == 0
1203
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
1204
+
1205
+ if return_codebook_ids:
1206
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
1207
+ if return_x0:
1208
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
1209
+ else:
1210
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
1211
+
1212
+ @torch.no_grad()
1213
+ def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
1214
+ img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
1215
+ score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
1216
+ log_every_t=None):
1217
+ if not log_every_t:
1218
+ log_every_t = self.log_every_t
1219
+ timesteps = self.num_timesteps
1220
+ if batch_size is not None:
1221
+ b = batch_size if batch_size is not None else shape[0]
1222
+ shape = [batch_size] + list(shape)
1223
+ else:
1224
+ b = batch_size = shape[0]
1225
+ if x_T is None:
1226
+ img = torch.randn(shape, device=self.device)
1227
+ else:
1228
+ img = x_T
1229
+ intermediates = []
1230
+ if cond is not None:
1231
+ if isinstance(cond, dict):
1232
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1233
+ list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1234
+ else:
1235
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1236
+
1237
+ if start_T is not None:
1238
+ timesteps = min(timesteps, start_T)
1239
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
1240
+ total=timesteps) if verbose else reversed(
1241
+ range(0, timesteps))
1242
+ if type(temperature) == float:
1243
+ temperature = [temperature] * timesteps
1244
+
1245
+ for i in iterator:
1246
+ ts = torch.full((b,), i, device=self.device, dtype=torch.long)
1247
+ if self.shorten_cond_schedule:
1248
+ assert self.model.conditioning_key != 'hybrid'
1249
+ tc = self.cond_ids[ts].to(cond.device)
1250
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1251
+
1252
+ img, x0_partial = self.p_sample(img, cond, ts,
1253
+ clip_denoised=self.clip_denoised,
1254
+ quantize_denoised=quantize_denoised, return_x0=True,
1255
+ temperature=temperature[i], noise_dropout=noise_dropout,
1256
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1257
+ if mask is not None:
1258
+ assert x0 is not None
1259
+ img_orig = self.q_sample(x0, ts)
1260
+ img = img_orig * mask + (1. - mask) * img
1261
+
1262
+ if i % log_every_t == 0 or i == timesteps - 1:
1263
+ intermediates.append(x0_partial)
1264
+ if callback: callback(i)
1265
+ if img_callback: img_callback(img, i)
1266
+ return img, intermediates
1267
+
1268
+ @torch.no_grad()
1269
+ def p_sample_loop(self, cond, shape, return_intermediates=False,
1270
+ x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
1271
+ mask=None, x0=None, img_callback=None, start_T=None,
1272
+ log_every_t=None):
1273
+
1274
+ if not log_every_t:
1275
+ log_every_t = self.log_every_t
1276
+ device = self.betas.device
1277
+ b = shape[0]
1278
+ if x_T is None:
1279
+ img = torch.randn(shape, device=device)
1280
+ else:
1281
+ img = x_T
1282
+
1283
+ intermediates = [img]
1284
+ if timesteps is None:
1285
+ timesteps = self.num_timesteps
1286
+
1287
+ if start_T is not None:
1288
+ timesteps = min(timesteps, start_T)
1289
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
1290
+ range(0, timesteps))
1291
+
1292
+ if mask is not None:
1293
+ assert x0 is not None
1294
+ assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
1295
+
1296
+ for i in iterator:
1297
+ ts = torch.full((b,), i, device=device, dtype=torch.long)
1298
+ if self.shorten_cond_schedule:
1299
+ assert self.model.conditioning_key != 'hybrid'
1300
+ tc = self.cond_ids[ts].to(cond.device)
1301
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1302
+
1303
+ # if self.is_test and i % 50 == 0:
1304
+ # decode_res = self.decode_first_stage(img)
1305
+ # rgb_sample, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1306
+ # decode_res, self.batch_rays, self.batch_img,
1307
+ # )
1308
+ # rgb_sample = to8b(rgb_sample.detach().cpu().numpy())[0]
1309
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "sample_process_{}.png".format(i)), rgb_sample)
1310
+ # colorize_res = self.first_stage_model.to_rgb(img)
1311
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "sample_process_latent_{}.png".format(i)), colorize_res[0])
1312
+
1313
+ img = self.p_sample(img, cond, ts,
1314
+ clip_denoised=self.clip_denoised,
1315
+ quantize_denoised=quantize_denoised)
1316
+ if mask is not None:
1317
+ img_orig = self.q_sample(x0, ts)
1318
+ img = img_orig * mask + (1. - mask) * img
1319
+
1320
+ if i % log_every_t == 0 or i == timesteps - 1:
1321
+ intermediates.append(img)
1322
+ if callback: callback(i)
1323
+ if img_callback: img_callback(img, i)
1324
+
1325
+ if return_intermediates:
1326
+ return img, intermediates
1327
+ return img
1328
+
1329
+ @torch.no_grad()
1330
+ def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
1331
+ verbose=True, timesteps=None, quantize_denoised=False,
1332
+ mask=None, x0=None, shape=None,**kwargs):
1333
+ if shape is None:
1334
+ shape = (batch_size, self.channels, self.image_size, self.image_size * 3)
1335
+ if cond is not None:
1336
+ if isinstance(cond, dict):
1337
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1338
+ list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1339
+ else:
1340
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1341
+ return self.p_sample_loop(cond,
1342
+ shape,
1343
+ return_intermediates=return_intermediates, x_T=x_T,
1344
+ verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
1345
+ mask=mask, x0=x0)
1346
+
1347
+ @torch.no_grad()
1348
+ def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
1349
+
1350
+ if ddim:
1351
+ ddim_sampler = DDIMSampler(self)
1352
+ shape = (self.channels, self.image_size, self.image_size)
1353
+ samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
1354
+ shape,cond,verbose=False,**kwargs)
1355
+
1356
+ else:
1357
+ samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
1358
+ return_intermediates=True,**kwargs)
1359
+
1360
+ return samples, intermediates
1361
+
1362
+ @torch.no_grad()
1363
+ def validation_step(self, batch, batch_idx):
1364
+ # x, c = self.get_input(batch, self.first_stage_key)
1365
+ # self.batch_rays = batch['batch_rays'][0][1:2]
1366
+ # self.batch_img = batch['img'][0][1:2]
1367
+ # self.is_test = True
1368
+ # self.test_schedule(x[0:1])
1369
+ # exit(0)
1370
+
1371
+ _, loss_dict_no_ema = self.shared_step(batch)
1372
+ with self.ema_scope():
1373
+ # _, loss_dict_ema = self.shared_step(batch)
1374
+ x, c = self.get_input(batch, self.first_stage_key)
1375
+ _, loss_dict_ema, inter_res = self(x, c, return_inter=True)
1376
+ loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
1377
+ self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True, sync_dist=True)
1378
+ self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True, sync_dist=True)
1379
+
1380
+ if batch_idx < 2:
1381
+ if self.num_timesteps < 1000:
1382
+ x_T = self.q_sample(x_start=x[0:1], t=torch.full((1,), self.num_timesteps-1, device=x.device, dtype=torch.long), noise=torch.randn_like(x[0:1]))
1383
+ print("Specifying x_T when sampling!")
1384
+ else:
1385
+ x_T = None
1386
+ with self.ema_scope():
1387
+ res = self.sample(c, 1, shape=x[0:1].shape, x_T = x_T)
1388
+ decode_res = self.decode_first_stage(res)
1389
+ decode_input = self.decode_first_stage(x[:1])
1390
+ decode_output = self.decode_first_stage(inter_res[:1])
1391
+
1392
+ colorize_res = self.first_stage_model.to_rgb(res)[0]
1393
+ colorize_x = self.first_stage_model.to_rgb(x[:1])[0]
1394
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "sample_{}_{}.png".format(batch_idx, 0)), colorize_res[0])
1395
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "gt_{}_{}.png".format(batch_idx, 0)), colorize_x[0])
1396
+
1397
+ rgb_sample, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1398
+ decode_res, batch['batch_rays'][0], batch['img'][0],
1399
+ )
1400
+ rgb_input, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1401
+ decode_input, batch['batch_rays'][0], batch['img'][0],
1402
+ )
1403
+ rgb_output, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1404
+ decode_output, batch['batch_rays'][0], batch['img'][0],
1405
+ )
1406
+ rgb_sample = to8b(rgb_sample.detach().cpu().numpy())
1407
+ rgb_input = to8b(rgb_input.detach().cpu().numpy())
1408
+ rgb_output = to8b(rgb_output.detach().cpu().numpy())
1409
+
1410
+ if rgb_sample.shape[0] == 1:
1411
+ rgb_all = np.concatenate([rgb_sample[0], rgb_input[0], rgb_output[0]], 1)
1412
+ else:
1413
+ rgb_all = np.concatenate([rgb_sample[1], rgb_input[1], rgb_output[1]], 1)
1414
+
1415
+
1416
+ if self.model.conditioning_key is not None:
1417
+ if self.cond_stage_key == 'img_cond':
1418
+ cond_img = super().get_input(batch, self.cond_stage_key)[0].permute(1, 2, 0)
1419
+ rgb_all = np.concatenate([rgb_all, to8b(cond_img.cpu().numpy())], 1)
1420
+ elif 'caption' in self.cond_stage_key:
1421
+ import cv2
1422
+ font = cv2.FONT_HERSHEY_SIMPLEX
1423
+ # org
1424
+ org = (50, 50)
1425
+ # fontScale
1426
+ fontScale = 1
1427
+ # Blue color in BGR
1428
+ color = (255, 0, 0)
1429
+ # Line thickness of 2 px
1430
+ thickness = 2
1431
+ caption = super().get_input(batch, 'caption')[0]
1432
+ break_caption = []
1433
+ for i in range(len(caption) // 30 + 1):
1434
+ break_caption_i = caption[i*30:(i+1)*30]
1435
+ break_caption.append(break_caption_i)
1436
+ for i, bci in enumerate(break_caption):
1437
+ cv2.putText(rgb_all, bci, (50, 50*(i+1)), font, fontScale, color, thickness, cv2.LINE_AA)
1438
+
1439
+ self.logger.experiment.log({
1440
+ "val/vis": [wandb.Image(rgb_all)],
1441
+ "val/colorize_rse": [wandb.Image(colorize_res)],
1442
+ "val/colorize_x": [wandb.Image(colorize_x)],
1443
+ })
1444
+
1445
+ @torch.no_grad()
1446
+ def test_schedule(self, x_start, freq=50):
1447
+ noise = torch.randn_like(x_start)
1448
+ img_list = []
1449
+ latent_list = []
1450
+ for t in tqdm(range(self.num_timesteps)):
1451
+ if t % freq == 0:
1452
+ t_long = torch.Tensor([t,]).long().to(x_start.device)
1453
+ x_noisy = self.q_sample(x_start=x_start, t=t_long, noise=noise)
1454
+ decode_res = self.decode_first_stage(x_noisy)
1455
+ rgb_sample, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1456
+ decode_res, self.batch_rays, self.batch_img,
1457
+ )
1458
+ rgb_sample = to8b(rgb_sample.detach().cpu().numpy())[0]
1459
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "add_noise_{}.png".format(t)), rgb_sample)
1460
+ colorize_res = self.first_stage_model.to_rgb(x_noisy)
1461
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "add_noise_latent_{}.png".format(t)), colorize_res[0])
1462
+ img_list.append(rgb_sample)
1463
+ latent_list.append(colorize_res[0])
1464
+ imageio.imwrite(os.path.join(self.logger.log_dir, "add_noise_{}_{}_{}_{}.png".format(self.linear_start, self.linear_end, self.beta_schedule, self.scale_factor)), np.concatenate(img_list, 1))
1465
+ imageio.imwrite(os.path.join(self.logger.log_dir, "add_noise_latent_{}_{}_{}_{}.png".format(self.linear_start, self.linear_end, self.beta_schedule, self.scale_factor)), np.concatenate(latent_list, 1))
1466
+
1467
+ @torch.no_grad()
1468
+ def test_step(self, batch, batch_idx):
1469
+ x, c = self.get_input(batch, self.first_stage_key)
1470
+ if self.test_mode == 'fid':
1471
+ bs = x.shape[0]
1472
+ else:
1473
+ bs = 1
1474
+ if self.test_mode == 'noise_schedule':
1475
+ self.batch_rays = batch['batch_rays'][0][33:34]
1476
+ self.batch_img = batch['img'][0][33:34]
1477
+ self.is_test = True
1478
+ self.test_schedule(x)
1479
+ exit(0)
1480
+ with self.ema_scope():
1481
+ if c is not None:
1482
+ res = self.sample(c[:bs], bs, shape=x[0:bs].shape)
1483
+ else:
1484
+ res = self.sample(None, bs, shape=x[0:bs].shape)
1485
+ decode_res = self.decode_first_stage(res)
1486
+ if self.test_mode == 'fid':
1487
+ folder = os.path.join(self.logger.log_dir, 'FID_' + self.test_tag)
1488
+ if not os.path.exists(folder):
1489
+ os.makedirs(folder, exist_ok=True)
1490
+ rgb_sample_list = []
1491
+ for b in range(bs):
1492
+ rgb_sample, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1493
+ decode_res[b:b+1], batch['batch_rays'][b], batch['img'][b],
1494
+ )
1495
+ rgb_sample = to8b(rgb_sample.detach().cpu().numpy())
1496
+ rgb_sample_list.append(rgb_sample)
1497
+ for i in range(len(rgb_sample_list)):
1498
+ for v in range(rgb_sample_list[i].shape[0]):
1499
+ imageio.imwrite(os.path.join(folder, "sample_{}_{}_{}.png".format(batch_idx, i, v)), rgb_sample_list[i][v])
1500
+ elif self.test_mode == 'sample':
1501
+ colorize_res = self.first_stage_model.to_rgb(res)
1502
+ colorize_x = self.first_stage_model.to_rgb(x[:1])
1503
+ imageio.imwrite(os.path.join(self.logger.log_dir, "sample_{}_{}.png".format(batch_idx, 0)), colorize_res[0])
1504
+ imageio.imwrite(os.path.join(self.logger.log_dir, "gt_{}_{}.png".format(batch_idx, 0)), colorize_x[0])
1505
+ if self.model.conditioning_key is not None:
1506
+ cond_img = super().get_input(batch, self.cond_stage_key)[0].permute(1, 2, 0)
1507
+ cond_img = to8b(cond_img.cpu().numpy())
1508
+ imageio.imwrite(os.path.join(self.logger.log_dir, "cond_{}_{}.png".format(batch_idx, 0)), cond_img)
1509
+ for b in range(bs):
1510
+ video = []
1511
+ for v in tqdm(range(batch['batch_rays'].shape[1])):
1512
+ rgb_sample, _ = self.first_stage_model.render_triplane_eg3d_decoder(
1513
+ decode_res[b:b+1], batch['batch_rays'][0][v:v+1], batch['img'][0][v:v+1],
1514
+ )
1515
+ rgb_sample = to8b(rgb_sample.detach().cpu().numpy())[0]
1516
+ video.append(rgb_sample)
1517
+ imageio.mimwrite(os.path.join(self.logger.log_dir, "sample_{}_{}.mp4".format(batch_idx, b)), video, fps=24)
1518
+ print("Saving to {}".format(os.path.join(self.logger.log_dir, "sample_{}_{}.mp4".format(batch_idx, b))))
1519
+
1520
+ @torch.no_grad()
1521
+ def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1522
+ quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1523
+ plot_diffusion_rows=True, **kwargs):
1524
+
1525
+ use_ddim = ddim_steps is not None
1526
+
1527
+ log = dict()
1528
+ z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
1529
+ return_first_stage_outputs=True,
1530
+ force_c_encode=True,
1531
+ return_original_cond=True,
1532
+ bs=N)
1533
+ N = min(x.shape[0], N)
1534
+ n_row = min(x.shape[0], n_row)
1535
+ log["inputs"] = x
1536
+ log["reconstruction"] = xrec
1537
+ if self.model.conditioning_key is not None:
1538
+ if hasattr(self.cond_stage_model, "decode"):
1539
+ xc = self.cond_stage_model.decode(c)
1540
+ log["conditioning"] = xc
1541
+ elif self.cond_stage_key in ["caption"]:
1542
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
1543
+ log["conditioning"] = xc
1544
+ elif self.cond_stage_key == 'class_label':
1545
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
1546
+ log['conditioning'] = xc
1547
+ elif isimage(xc):
1548
+ log["conditioning"] = xc
1549
+ if ismap(xc):
1550
+ log["original_conditioning"] = self.to_rgb(xc)
1551
+
1552
+ if plot_diffusion_rows:
1553
+ # get diffusion row
1554
+ diffusion_row = list()
1555
+ z_start = z[:n_row]
1556
+ for t in range(self.num_timesteps):
1557
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1558
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1559
+ t = t.to(self.device).long()
1560
+ noise = torch.randn_like(z_start)
1561
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1562
+ diffusion_row.append(self.decode_first_stage(z_noisy))
1563
+
1564
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1565
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1566
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1567
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1568
+ log["diffusion_row"] = diffusion_grid
1569
+
1570
+ if sample:
1571
+ # get denoise row
1572
+ with self.ema_scope("Plotting"):
1573
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1574
+ ddim_steps=ddim_steps,eta=ddim_eta)
1575
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1576
+ x_samples = self.decode_first_stage(samples)
1577
+ log["samples"] = x_samples
1578
+ if plot_denoise_rows:
1579
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1580
+ log["denoise_row"] = denoise_grid
1581
+
1582
+ if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
1583
+ self.first_stage_model, IdentityFirstStage):
1584
+ # also display when quantizing x0 while sampling
1585
+ with self.ema_scope("Plotting Quantized Denoised"):
1586
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1587
+ ddim_steps=ddim_steps,eta=ddim_eta,
1588
+ quantize_denoised=True)
1589
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
1590
+ # quantize_denoised=True)
1591
+ x_samples = self.decode_first_stage(samples.to(self.device))
1592
+ log["samples_x0_quantized"] = x_samples
1593
+
1594
+ if inpaint:
1595
+ # make a simple center square
1596
+ b, h, w = z.shape[0], z.shape[2], z.shape[3]
1597
+ mask = torch.ones(N, h, w).to(self.device)
1598
+ # zeros will be filled in
1599
+ mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1600
+ mask = mask[:, None, ...]
1601
+ with self.ema_scope("Plotting Inpaint"):
1602
+
1603
+ samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
1604
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1605
+ x_samples = self.decode_first_stage(samples.to(self.device))
1606
+ log["samples_inpainting"] = x_samples
1607
+ log["mask"] = mask
1608
+
1609
+ # outpaint
1610
+ with self.ema_scope("Plotting Outpaint"):
1611
+ samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
1612
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1613
+ x_samples = self.decode_first_stage(samples.to(self.device))
1614
+ log["samples_outpainting"] = x_samples
1615
+
1616
+ if plot_progressive_rows:
1617
+ with self.ema_scope("Plotting Progressives"):
1618
+ img, progressives = self.progressive_denoising(c,
1619
+ shape=(self.channels, self.image_size, self.image_size),
1620
+ batch_size=N)
1621
+ prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1622
+ log["progressive_row"] = prog_row
1623
+
1624
+ if return_keys:
1625
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1626
+ return log
1627
+ else:
1628
+ return {key: log[key] for key in return_keys}
1629
+ return log
1630
+
1631
+ def configure_optimizers(self):
1632
+ lr = self.learning_rate
1633
+ params = list(self.model.parameters())
1634
+ if self.cond_stage_trainable:
1635
+ print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1636
+ params = params + list(self.cond_stage_model.parameters())
1637
+ if self.learn_logvar:
1638
+ print('Diffusion model optimizing logvar')
1639
+ params.append(self.logvar)
1640
+ opt = torch.optim.AdamW(params, lr=lr)
1641
+ if self.use_scheduler:
1642
+ assert 'target' in self.scheduler_config
1643
+ scheduler = instantiate_from_config(self.scheduler_config)
1644
+
1645
+ print("Setting up LambdaLR scheduler...")
1646
+ scheduler = [
1647
+ {
1648
+ 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1649
+ 'interval': 'step',
1650
+ 'frequency': 1
1651
+ }]
1652
+ return [opt], scheduler
1653
+ return opt
1654
+
1655
+ @torch.no_grad()
1656
+ def to_rgb(self, x):
1657
+ x = x.float()
1658
+ if not hasattr(self, "colorize"):
1659
+ self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1660
+ x = nn.functional.conv2d(x, weight=self.colorize)
1661
+ x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1662
+ return x
1663
+
1664
+
1665
+
1666
+ class DiffusionWrapper(pl.LightningModule):
1667
+ def __init__(self, diff_model_config, conditioning_key):
1668
+ super().__init__()
1669
+ self.diffusion_model = instantiate_from_config(diff_model_config)
1670
+ self.conditioning_key = conditioning_key
1671
+ assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
1672
+
1673
+ def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
1674
+ if self.conditioning_key is None:
1675
+ out = self.diffusion_model(x, t)
1676
+ elif self.conditioning_key == 'concat':
1677
+ xc = torch.cat([x] + c_concat, dim=1)
1678
+ out = self.diffusion_model(xc, t)
1679
+ elif self.conditioning_key == 'crossattn':
1680
+ cc = torch.cat(c_crossattn, 1)
1681
+ out = self.diffusion_model(x, t, context=cc)
1682
+ elif self.conditioning_key == 'hybrid':
1683
+ xc = torch.cat([x] + c_concat, dim=1)
1684
+ cc = torch.cat(c_crossattn, 1)
1685
+ out = self.diffusion_model(xc, t, context=cc)
1686
+ elif self.conditioning_key == 'adm':
1687
+ cc = c_crossattn[0]
1688
+ out = self.diffusion_model(x, t, y=cc)
1689
+ else:
1690
+ raise NotImplementedError()
1691
+
1692
+ return out
1693
+
1694
+
1695
+ class Layout2ImgDiffusion(LatentDiffusion):
1696
+ # TODO: move all layout-specific hacks to this class
1697
+ def __init__(self, cond_stage_key, *args, **kwargs):
1698
+ assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
1699
+ super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs)
1700
+
1701
+ def log_images(self, batch, N=8, *args, **kwargs):
1702
+ logs = super().log_images(batch=batch, N=N, *args, **kwargs)
1703
+
1704
+ key = 'train' if self.training else 'validation'
1705
+ dset = self.trainer.datamodule.datasets[key]
1706
+ mapper = dset.conditional_builders[self.cond_stage_key]
1707
+
1708
+ bbox_imgs = []
1709
+ map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
1710
+ for tknzd_bbox in batch[self.cond_stage_key][:N]:
1711
+ bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
1712
+ bbox_imgs.append(bboximg)
1713
+
1714
+ cond_img = torch.stack(bbox_imgs, dim=0)
1715
+ logs['bbox_image'] = cond_img
1716
+ return logs
3DTopia/ldm/models/diffusion/dpm_solver/__init__.py ADDED
@@ -0,0 +1 @@
 
1
+ from .sampler import DPMSolverSampler
3DTopia/ldm/models/diffusion/dpm_solver/dpm_solver.py ADDED
@@ -0,0 +1,1184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import math
4
+
5
+
6
+ class NoiseScheduleVP:
7
+ def __init__(
8
+ self,
9
+ schedule='discrete',
10
+ betas=None,
11
+ alphas_cumprod=None,
12
+ continuous_beta_0=0.1,
13
+ continuous_beta_1=20.,
14
+ ):
15
+ """Create a wrapper class for the forward SDE (VP type).
16
+
17
+ ***
18
+ Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
19
+ We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
20
+ ***
21
+
22
+ The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
23
+ We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
24
+ Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
25
+
26
+ log_alpha_t = self.marginal_log_mean_coeff(t)
27
+ sigma_t = self.marginal_std(t)
28
+ lambda_t = self.marginal_lambda(t)
29
+
30
+ Moreover, as lambda(t) is an invertible function, we also support its inverse function:
31
+
32
+ t = self.inverse_lambda(lambda_t)
33
+
34
+ ===============================================================
35
+
36
+ We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
37
+
38
+ 1. For discrete-time DPMs:
39
+
40
+ For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
41
+ t_i = (i + 1) / N
42
+ e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
43
+ We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
44
+
45
+ Args:
46
+ betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
47
+ alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
48
+
49
+ Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
50
+
51
+ **Important**: Please pay special attention for the args for `alphas_cumprod`:
52
+ The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
53
+ q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
54
+ Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
55
+ alpha_{t_n} = \sqrt{\hat{alpha_n}},
56
+ and
57
+ log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
58
+
59
+
60
+ 2. For continuous-time DPMs:
61
+
62
+ We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
63
+ schedule are the default settings in DDPM and improved-DDPM:
64
+
65
+ Args:
66
+ beta_min: A `float` number. The smallest beta for the linear schedule.
67
+ beta_max: A `float` number. The largest beta for the linear schedule.
68
+ cosine_s: A `float` number. The hyperparameter in the cosine schedule.
69
+ cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
70
+ T: A `float` number. The ending time of the forward process.
71
+
72
+ ===============================================================
73
+
74
+ Args:
75
+ schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
76
+ 'linear' or 'cosine' for continuous-time DPMs.
77
+ Returns:
78
+ A wrapper object of the forward SDE (VP type).
79
+
80
+ ===============================================================
81
+
82
+ Example:
83
+
84
+ # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
85
+ >>> ns = NoiseScheduleVP('discrete', betas=betas)
86
+
87
+ # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
88
+ >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
89
+
90
+ # For continuous-time DPMs (VPSDE), linear schedule:
91
+ >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
92
+
93
+ """
94
+
95
+ if schedule not in ['discrete', 'linear', 'cosine']:
96
+ raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule))
97
+
98
+ self.schedule = schedule
99
+ if schedule == 'discrete':
100
+ if betas is not None:
101
+ log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
102
+ else:
103
+ assert alphas_cumprod is not None
104
+ log_alphas = 0.5 * torch.log(alphas_cumprod)
105
+ self.total_N = len(log_alphas)
106
+ self.T = 1.
107
+ self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
108
+ self.log_alpha_array = log_alphas.reshape((1, -1,))
109
+ else:
110
+ self.total_N = 1000
111
+ self.beta_0 = continuous_beta_0
112
+ self.beta_1 = continuous_beta_1
113
+ self.cosine_s = 0.008
114
+ self.cosine_beta_max = 999.
115
+ self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
116
+ self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
117
+ self.schedule = schedule
118
+ if schedule == 'cosine':
119
+ # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
120
+ # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
121
+ self.T = 0.9946
122
+ else:
123
+ self.T = 1.
124
+
125
+ def marginal_log_mean_coeff(self, t):
126
+ """
127
+ Compute log(alpha_t) of a given continuous-time label t in [0, T].
128
+ """
129
+ if self.schedule == 'discrete':
130
+ return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1))
131
+ elif self.schedule == 'linear':
132
+ return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
133
+ elif self.schedule == 'cosine':
134
+ log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
135
+ log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
136
+ return log_alpha_t
137
+
138
+ def marginal_alpha(self, t):
139
+ """
140
+ Compute alpha_t of a given continuous-time label t in [0, T].
141
+ """
142
+ return torch.exp(self.marginal_log_mean_coeff(t))
143
+
144
+ def marginal_std(self, t):
145
+ """
146
+ Compute sigma_t of a given continuous-time label t in [0, T].
147
+ """
148
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
149
+
150
+ def marginal_lambda(self, t):
151
+ """
152
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
153
+ """
154
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
155
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
156
+ return log_mean_coeff - log_std
157
+
158
+ def inverse_lambda(self, lamb):
159
+ """
160
+ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
161
+ """
162
+ if self.schedule == 'linear':
163
+ tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
164
+ Delta = self.beta_0**2 + tmp
165
+ return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
166
+ elif self.schedule == 'discrete':
167
+ log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
168
+ t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))
169
+ return t.reshape((-1,))
170
+ else:
171
+ log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
172
+ t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
173
+ t = t_fn(log_alpha)
174
+ return t
175
+
176
+
177
+ def model_wrapper(
178
+ model,
179
+ noise_schedule,
180
+ model_type="noise",
181
+ model_kwargs={},
182
+ guidance_type="uncond",
183
+ condition=None,
184
+ unconditional_condition=None,
185
+ guidance_scale=1.,
186
+ classifier_fn=None,
187
+ classifier_kwargs={},
188
+ ):
189
+ """Create a wrapper function for the noise prediction model.
190
+
191
+ DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
192
+ firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
193
+
194
+ We support four types of the diffusion model by setting `model_type`:
195
+
196
+ 1. "noise": noise prediction model. (Trained by predicting noise).
197
+
198
+ 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
199
+
200
+ 3. "v": velocity prediction model. (Trained by predicting the velocity).
201
+ The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
202
+
203
+ [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
204
+ arXiv preprint arXiv:2202.00512 (2022).
205
+ [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
206
+ arXiv preprint arXiv:2210.02303 (2022).
207
+
208
+ 4. "score": marginal score function. (Trained by denoising score matching).
209
+ Note that the score function and the noise prediction model follows a simple relationship:
210
+ ```
211
+ noise(x_t, t) = -sigma_t * score(x_t, t)
212
+ ```
213
+
214
+ We support three types of guided sampling by DPMs by setting `guidance_type`:
215
+ 1. "uncond": unconditional sampling by DPMs.
216
+ The input `model` has the following format:
217
+ ``
218
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
219
+ ``
220
+
221
+ 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
222
+ The input `model` has the following format:
223
+ ``
224
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
225
+ ``
226
+
227
+ The input `classifier_fn` has the following format:
228
+ ``
229
+ classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
230
+ ``
231
+
232
+ [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
233
+ in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
234
+
235
+ 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
236
+ The input `model` has the following format:
237
+ ``
238
+ model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
239
+ ``
240
+ And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
241
+
242
+ [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
243
+ arXiv preprint arXiv:2207.12598 (2022).
244
+
245
+
246
+ The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
247
+ or continuous-time labels (i.e. epsilon to T).
248
+
249
+ We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
250
+ ``
251
+ def model_fn(x, t_continuous) -> noise:
252
+ t_input = get_model_input_time(t_continuous)
253
+ return noise_pred(model, x, t_input, **model_kwargs)
254
+ ``
255
+ where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
256
+
257
+ ===============================================================
258
+
259
+ Args:
260
+ model: A diffusion model with the corresponding format described above.
261
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
262
+ model_type: A `str`. The parameterization type of the diffusion model.
263
+ "noise" or "x_start" or "v" or "score".
264
+ model_kwargs: A `dict`. A dict for the other inputs of the model function.
265
+ guidance_type: A `str`. The type of the guidance for sampling.
266
+ "uncond" or "classifier" or "classifier-free".
267
+ condition: A pytorch tensor. The condition for the guided sampling.
268
+ Only used for "classifier" or "classifier-free" guidance type.
269
+ unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
270
+ Only used for "classifier-free" guidance type.
271
+ guidance_scale: A `float`. The scale for the guided sampling.
272
+ classifier_fn: A classifier function. Only used for the classifier guidance.
273
+ classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
274
+ Returns:
275
+ A noise prediction model that accepts the noised data and the continuous time as the inputs.
276
+ """
277
+
278
+ def get_model_input_time(t_continuous):
279
+ """
280
+ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
281
+ For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
282
+ For continuous-time DPMs, we just use `t_continuous`.
283
+ """
284
+ if noise_schedule.schedule == 'discrete':
285
+ return (t_continuous - 1. / noise_schedule.total_N) * 1000.
286
+ else:
287
+ return t_continuous
288
+
289
+ def noise_pred_fn(x, t_continuous, cond=None):
290
+ if t_continuous.reshape((-1,)).shape[0] == 1:
291
+ t_continuous = t_continuous.expand((x.shape[0]))
292
+ t_input = get_model_input_time(t_continuous)
293
+ if cond is None:
294
+ output = model(x, t_input, **model_kwargs)
295
+ else:
296
+ output = model(x, t_input, cond, **model_kwargs)
297
+ if model_type == "noise":
298
+ return output
299
+ elif model_type == "x_start":
300
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
301
+ dims = x.dim()
302
+ return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
303
+ elif model_type == "v":
304
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
305
+ dims = x.dim()
306
+ return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
307
+ elif model_type == "score":
308
+ sigma_t = noise_schedule.marginal_std(t_continuous)
309
+ dims = x.dim()
310
+ return -expand_dims(sigma_t, dims) * output
311
+
312
+ def cond_grad_fn(x, t_input):
313
+ """
314
+ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
315
+ """
316
+ with torch.enable_grad():
317
+ x_in = x.detach().requires_grad_(True)
318
+ log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
319
+ return torch.autograd.grad(log_prob.sum(), x_in)[0]
320
+
321
+ def model_fn(x, t_continuous):
322
+ """
323
+ The noise predicition model function that is used for DPM-Solver.
324
+ """
325
+ if t_continuous.reshape((-1,)).shape[0] == 1:
326
+ t_continuous = t_continuous.expand((x.shape[0]))
327
+ if guidance_type == "uncond":
328
+ return noise_pred_fn(x, t_continuous)
329
+ elif guidance_type == "classifier":
330
+ assert classifier_fn is not None
331
+ t_input = get_model_input_time(t_continuous)
332
+ cond_grad = cond_grad_fn(x, t_input)
333
+ sigma_t = noise_schedule.marginal_std(t_continuous)
334
+ noise = noise_pred_fn(x, t_continuous)
335
+ return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
336
+ elif guidance_type == "classifier-free":
337
+ if guidance_scale == 1. or unconditional_condition is None:
338
+ return noise_pred_fn(x, t_continuous, cond=condition)
339
+ else:
340
+ x_in = torch.cat([x] * 2)
341
+ t_in = torch.cat([t_continuous] * 2)
342
+ c_in = torch.cat([unconditional_condition, condition])
343
+ noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
344
+ return noise_uncond + guidance_scale * (noise - noise_uncond)
345
+
346
+ assert model_type in ["noise", "x_start", "v"]
347
+ assert guidance_type in ["uncond", "classifier", "classifier-free"]
348
+ return model_fn
349
+
350
+
351
+ class DPM_Solver:
352
+ def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
353
+ """Construct a DPM-Solver.
354
+
355
+ We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
356
+ If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
357
+ If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
358
+ In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
359
+ The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
360
+
361
+ Args:
362
+ model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
363
+ ``
364
+ def model_fn(x, t_continuous):
365
+ return noise
366
+ ``
367
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
368
+ predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
369
+ thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
370
+ max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
371
+
372
+ [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
373
+ """
374
+ self.model = model_fn
375
+ self.noise_schedule = noise_schedule
376
+ self.predict_x0 = predict_x0
377
+ self.thresholding = thresholding
378
+ self.max_val = max_val
379
+
380
+ def noise_prediction_fn(self, x, t):
381
+ """
382
+ Return the noise prediction model.
383
+ """
384
+ return self.model(x, t)
385
+
386
+ def data_prediction_fn(self, x, t):
387
+ """
388
+ Return the data prediction model (with thresholding).
389
+ """
390
+ noise = self.noise_prediction_fn(x, t)
391
+ dims = x.dim()
392
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
393
+ x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
394
+ if self.thresholding:
395
+ p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
396
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
397
+ s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
398
+ x0 = torch.clamp(x0, -s, s) / s
399
+ return x0
400
+
401
+ def model_fn(self, x, t):
402
+ """
403
+ Convert the model to the noise prediction model or the data prediction model.
404
+ """
405
+ if self.predict_x0:
406
+ return self.data_prediction_fn(x, t)
407
+ else:
408
+ return self.noise_prediction_fn(x, t)
409
+
410
+ def get_time_steps(self, skip_type, t_T, t_0, N, device):
411
+ """Compute the intermediate time steps for sampling.
412
+
413
+ Args:
414
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
415
+ - 'logSNR': uniform logSNR for the time steps.
416
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
417
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
418
+ t_T: A `float`. The starting time of the sampling (default is T).
419
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
420
+ N: A `int`. The total number of the spacing of the time steps.
421
+ device: A torch device.
422
+ Returns:
423
+ A pytorch tensor of the time steps, with the shape (N + 1,).
424
+ """
425
+ if skip_type == 'logSNR':
426
+ lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
427
+ lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
428
+ logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
429
+ return self.noise_schedule.inverse_lambda(logSNR_steps)
430
+ elif skip_type == 'time_uniform':
431
+ return torch.linspace(t_T, t_0, N + 1).to(device)
432
+ elif skip_type == 'time_quadratic':
433
+ t_order = 2
434
+ t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)
435
+ return t
436
+ else:
437
+ raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
438
+
439
+ def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
440
+ """
441
+ Get the order of each step for sampling by the singlestep DPM-Solver.
442
+
443
+ We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
444
+ Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
445
+ - If order == 1:
446
+ We take `steps` of DPM-Solver-1 (i.e. DDIM).
447
+ - If order == 2:
448
+ - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
449
+ - If steps % 2 == 0, we use K steps of DPM-Solver-2.
450
+ - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
451
+ - If order == 3:
452
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
453
+ - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
454
+ - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
455
+ - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
456
+
457
+ ============================================
458
+ Args:
459
+ order: A `int`. The max order for the solver (2 or 3).
460
+ steps: A `int`. The total number of function evaluations (NFE).
461
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
462
+ - 'logSNR': uniform logSNR for the time steps.
463
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
464
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
465
+ t_T: A `float`. The starting time of the sampling (default is T).
466
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
467
+ device: A torch device.
468
+ Returns:
469
+ orders: A list of the solver order of each step.
470
+ """
471
+ if order == 3:
472
+ K = steps // 3 + 1
473
+ if steps % 3 == 0:
474
+ orders = [3,] * (K - 2) + [2, 1]
475
+ elif steps % 3 == 1:
476
+ orders = [3,] * (K - 1) + [1]
477
+ else:
478
+ orders = [3,] * (K - 1) + [2]
479
+ elif order == 2:
480
+ if steps % 2 == 0:
481
+ K = steps // 2
482
+ orders = [2,] * K
483
+ else:
484
+ K = steps // 2 + 1
485
+ orders = [2,] * (K - 1) + [1]
486
+ elif order == 1:
487
+ K = 1
488
+ orders = [1,] * steps
489
+ else:
490
+ raise ValueError("'order' must be '1' or '2' or '3'.")
491
+ if skip_type == 'logSNR':
492
+ # To reproduce the results in DPM-Solver paper
493
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
494
+ else:
495
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders)).to(device)]
496
+ return timesteps_outer, orders
497
+
498
+ def denoise_to_zero_fn(self, x, s):
499
+ """
500
+ Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
501
+ """
502
+ return self.data_prediction_fn(x, s)
503
+
504
+ def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
505
+ """
506
+ DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
507
+
508
+ Args:
509
+ x: A pytorch tensor. The initial value at time `s`.
510
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
511
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
512
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
513
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
514
+ return_intermediate: A `bool`. If true, also return the model value at time `s`.
515
+ Returns:
516
+ x_t: A pytorch tensor. The approximated solution at time `t`.
517
+ """
518
+ ns = self.noise_schedule
519
+ dims = x.dim()
520
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
521
+ h = lambda_t - lambda_s
522
+ log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
523
+ sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
524
+ alpha_t = torch.exp(log_alpha_t)
525
+
526
+ if self.predict_x0:
527
+ phi_1 = torch.expm1(-h)
528
+ if model_s is None:
529
+ model_s = self.model_fn(x, s)
530
+ x_t = (
531
+ expand_dims(sigma_t / sigma_s, dims) * x
532
+ - expand_dims(alpha_t * phi_1, dims) * model_s
533
+ )
534
+ if return_intermediate:
535
+ return x_t, {'model_s': model_s}
536
+ else:
537
+ return x_t
538
+ else:
539
+ phi_1 = torch.expm1(h)
540
+ if model_s is None:
541
+ model_s = self.model_fn(x, s)
542
+ x_t = (
543
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
544
+ - expand_dims(sigma_t * phi_1, dims) * model_s
545
+ )
546
+ if return_intermediate:
547
+ return x_t, {'model_s': model_s}
548
+ else:
549
+ return x_t
550
+
551
+ def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpm_solver'):
552
+ """
553
+ Singlestep solver DPM-Solver-2 from time `s` to time `t`.
554
+
555
+ Args:
556
+ x: A pytorch tensor. The initial value at time `s`.
557
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
558
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
559
+ r1: A `float`. The hyperparameter of the second-order solver.
560
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
561
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
562
+ return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
563
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
564
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
565
+ Returns:
566
+ x_t: A pytorch tensor. The approximated solution at time `t`.
567
+ """
568
+ if solver_type not in ['dpm_solver', 'taylor']:
569
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
570
+ if r1 is None:
571
+ r1 = 0.5
572
+ ns = self.noise_schedule
573
+ dims = x.dim()
574
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
575
+ h = lambda_t - lambda_s
576
+ lambda_s1 = lambda_s + r1 * h
577
+ s1 = ns.inverse_lambda(lambda_s1)
578
+ log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t)
579
+ sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
580
+ alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
581
+
582
+ if self.predict_x0:
583
+ phi_11 = torch.expm1(-r1 * h)
584
+ phi_1 = torch.expm1(-h)
585
+
586
+ if model_s is None:
587
+ model_s = self.model_fn(x, s)
588
+ x_s1 = (
589
+ expand_dims(sigma_s1 / sigma_s, dims) * x
590
+ - expand_dims(alpha_s1 * phi_11, dims) * model_s
591
+ )
592
+ model_s1 = self.model_fn(x_s1, s1)
593
+ if solver_type == 'dpm_solver':
594
+ x_t = (
595
+ expand_dims(sigma_t / sigma_s, dims) * x
596
+ - expand_dims(alpha_t * phi_1, dims) * model_s
597
+ - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
598
+ )
599
+ elif solver_type == 'taylor':
600
+ x_t = (
601
+ expand_dims(sigma_t / sigma_s, dims) * x
602
+ - expand_dims(alpha_t * phi_1, dims) * model_s
603
+ + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (model_s1 - model_s)
604
+ )
605
+ else:
606
+ phi_11 = torch.expm1(r1 * h)
607
+ phi_1 = torch.expm1(h)
608
+
609
+ if model_s is None:
610
+ model_s = self.model_fn(x, s)
611
+ x_s1 = (
612
+ expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
613
+ - expand_dims(sigma_s1 * phi_11, dims) * model_s
614
+ )
615
+ model_s1 = self.model_fn(x_s1, s1)
616
+ if solver_type == 'dpm_solver':
617
+ x_t = (
618
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
619
+ - expand_dims(sigma_t * phi_1, dims) * model_s
620
+ - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
621
+ )
622
+ elif solver_type == 'taylor':
623
+ x_t = (
624
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
625
+ - expand_dims(sigma_t * phi_1, dims) * model_s
626
+ - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
627
+ )
628
+ if return_intermediate:
629
+ return x_t, {'model_s': model_s, 'model_s1': model_s1}
630
+ else:
631
+ return x_t
632
+
633
+ def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpm_solver'):
634
+ """
635
+ Singlestep solver DPM-Solver-3 from time `s` to time `t`.
636
+
637
+ Args:
638
+ x: A pytorch tensor. The initial value at time `s`.
639
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
640
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
641
+ r1: A `float`. The hyperparameter of the third-order solver.
642
+ r2: A `float`. The hyperparameter of the third-order solver.
643
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
644
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
645
+ model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
646
+ If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
647
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
648
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
649
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
650
+ Returns:
651
+ x_t: A pytorch tensor. The approximated solution at time `t`.
652
+ """
653
+ if solver_type not in ['dpm_solver', 'taylor']:
654
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
655
+ if r1 is None:
656
+ r1 = 1. / 3.
657
+ if r2 is None:
658
+ r2 = 2. / 3.
659
+ ns = self.noise_schedule
660
+ dims = x.dim()
661
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
662
+ h = lambda_t - lambda_s
663
+ lambda_s1 = lambda_s + r1 * h
664
+ lambda_s2 = lambda_s + r2 * h
665
+ s1 = ns.inverse_lambda(lambda_s1)
666
+ s2 = ns.inverse_lambda(lambda_s2)
667
+ log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
668
+ sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t)
669
+ alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
670
+
671
+ if self.predict_x0:
672
+ phi_11 = torch.expm1(-r1 * h)
673
+ phi_12 = torch.expm1(-r2 * h)
674
+ phi_1 = torch.expm1(-h)
675
+ phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
676
+ phi_2 = phi_1 / h + 1.
677
+ phi_3 = phi_2 / h - 0.5
678
+
679
+ if model_s is None:
680
+ model_s = self.model_fn(x, s)
681
+ if model_s1 is None:
682
+ x_s1 = (
683
+ expand_dims(sigma_s1 / sigma_s, dims) * x
684
+ - expand_dims(alpha_s1 * phi_11, dims) * model_s
685
+ )
686
+ model_s1 = self.model_fn(x_s1, s1)
687
+ x_s2 = (
688
+ expand_dims(sigma_s2 / sigma_s, dims) * x
689
+ - expand_dims(alpha_s2 * phi_12, dims) * model_s
690
+ + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
691
+ )
692
+ model_s2 = self.model_fn(x_s2, s2)
693
+ if solver_type == 'dpm_solver':
694
+ x_t = (
695
+ expand_dims(sigma_t / sigma_s, dims) * x
696
+ - expand_dims(alpha_t * phi_1, dims) * model_s
697
+ + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
698
+ )
699
+ elif solver_type == 'taylor':
700
+ D1_0 = (1. / r1) * (model_s1 - model_s)
701
+ D1_1 = (1. / r2) * (model_s2 - model_s)
702
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
703
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
704
+ x_t = (
705
+ expand_dims(sigma_t / sigma_s, dims) * x
706
+ - expand_dims(alpha_t * phi_1, dims) * model_s
707
+ + expand_dims(alpha_t * phi_2, dims) * D1
708
+ - expand_dims(alpha_t * phi_3, dims) * D2
709
+ )
710
+ else:
711
+ phi_11 = torch.expm1(r1 * h)
712
+ phi_12 = torch.expm1(r2 * h)
713
+ phi_1 = torch.expm1(h)
714
+ phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
715
+ phi_2 = phi_1 / h - 1.
716
+ phi_3 = phi_2 / h - 0.5
717
+
718
+ if model_s is None:
719
+ model_s = self.model_fn(x, s)
720
+ if model_s1 is None:
721
+ x_s1 = (
722
+ expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
723
+ - expand_dims(sigma_s1 * phi_11, dims) * model_s
724
+ )
725
+ model_s1 = self.model_fn(x_s1, s1)
726
+ x_s2 = (
727
+ expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
728
+ - expand_dims(sigma_s2 * phi_12, dims) * model_s
729
+ - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
730
+ )
731
+ model_s2 = self.model_fn(x_s2, s2)
732
+ if solver_type == 'dpm_solver':
733
+ x_t = (
734
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
735
+ - expand_dims(sigma_t * phi_1, dims) * model_s
736
+ - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
737
+ )
738
+ elif solver_type == 'taylor':
739
+ D1_0 = (1. / r1) * (model_s1 - model_s)
740
+ D1_1 = (1. / r2) * (model_s2 - model_s)
741
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
742
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
743
+ x_t = (
744
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
745
+ - expand_dims(sigma_t * phi_1, dims) * model_s
746
+ - expand_dims(sigma_t * phi_2, dims) * D1
747
+ - expand_dims(sigma_t * phi_3, dims) * D2
748
+ )
749
+
750
+ if return_intermediate:
751
+ return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
752
+ else:
753
+ return x_t
754
+
755
+ def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
756
+ """
757
+ Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
758
+
759
+ Args:
760
+ x: A pytorch tensor. The initial value at time `s`.
761
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
762
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
763
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
764
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
765
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
766
+ Returns:
767
+ x_t: A pytorch tensor. The approximated solution at time `t`.
768
+ """
769
+ if solver_type not in ['dpm_solver', 'taylor']:
770
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
771
+ ns = self.noise_schedule
772
+ dims = x.dim()
773
+ model_prev_1, model_prev_0 = model_prev_list
774
+ t_prev_1, t_prev_0 = t_prev_list
775
+ lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
776
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
777
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
778
+ alpha_t = torch.exp(log_alpha_t)
779
+
780
+ h_0 = lambda_prev_0 - lambda_prev_1
781
+ h = lambda_t - lambda_prev_0
782
+ r0 = h_0 / h
783
+ D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
784
+ if self.predict_x0:
785
+ if solver_type == 'dpm_solver':
786
+ x_t = (
787
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
788
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
789
+ - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
790
+ )
791
+ elif solver_type == 'taylor':
792
+ x_t = (
793
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
794
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
795
+ + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
796
+ )
797
+ else:
798
+ if solver_type == 'dpm_solver':
799
+ x_t = (
800
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
801
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
802
+ - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
803
+ )
804
+ elif solver_type == 'taylor':
805
+ x_t = (
806
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
807
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
808
+ - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
809
+ )
810
+ return x_t
811
+
812
+ def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
813
+ """
814
+ Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
815
+
816
+ Args:
817
+ x: A pytorch tensor. The initial value at time `s`.
818
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
819
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
820
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
821
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
822
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
823
+ Returns:
824
+ x_t: A pytorch tensor. The approximated solution at time `t`.
825
+ """
826
+ ns = self.noise_schedule
827
+ dims = x.dim()
828
+ model_prev_2, model_prev_1, model_prev_0 = model_prev_list
829
+ t_prev_2, t_prev_1, t_prev_0 = t_prev_list
830
+ lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
831
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
832
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
833
+ alpha_t = torch.exp(log_alpha_t)
834
+
835
+ h_1 = lambda_prev_1 - lambda_prev_2
836
+ h_0 = lambda_prev_0 - lambda_prev_1
837
+ h = lambda_t - lambda_prev_0
838
+ r0, r1 = h_0 / h, h_1 / h
839
+ D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
840
+ D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
841
+ D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
842
+ D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
843
+ if self.predict_x0:
844
+ x_t = (
845
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
846
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
847
+ + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
848
+ - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h**2 - 0.5), dims) * D2
849
+ )
850
+ else:
851
+ x_t = (
852
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
853
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
854
+ - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
855
+ - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h**2 - 0.5), dims) * D2
856
+ )
857
+ return x_t
858
+
859
+ def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, r2=None):
860
+ """
861
+ Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
862
+
863
+ Args:
864
+ x: A pytorch tensor. The initial value at time `s`.
865
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
866
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
867
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
868
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
869
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
870
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
871
+ r1: A `float`. The hyperparameter of the second-order or third-order solver.
872
+ r2: A `float`. The hyperparameter of the third-order solver.
873
+ Returns:
874
+ x_t: A pytorch tensor. The approximated solution at time `t`.
875
+ """
876
+ if order == 1:
877
+ return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
878
+ elif order == 2:
879
+ return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1)
880
+ elif order == 3:
881
+ return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2)
882
+ else:
883
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
884
+
885
+ def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
886
+ """
887
+ Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
888
+
889
+ Args:
890
+ x: A pytorch tensor. The initial value at time `s`.
891
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
892
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
893
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
894
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
895
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
896
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
897
+ Returns:
898
+ x_t: A pytorch tensor. The approximated solution at time `t`.
899
+ """
900
+ if order == 1:
901
+ return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
902
+ elif order == 2:
903
+ return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
904
+ elif order == 3:
905
+ return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
906
+ else:
907
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
908
+
909
+ def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpm_solver'):
910
+ """
911
+ The adaptive step size solver based on singlestep DPM-Solver.
912
+
913
+ Args:
914
+ x: A pytorch tensor. The initial value at time `t_T`.
915
+ order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
916
+ t_T: A `float`. The starting time of the sampling (default is T).
917
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
918
+ h_init: A `float`. The initial step size (for logSNR).
919
+ atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
920
+ rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
921
+ theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
922
+ t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
923
+ current time and `t_0` is less than `t_err`. The default setting is 1e-5.
924
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
925
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
926
+ Returns:
927
+ x_0: A pytorch tensor. The approximated solution at time `t_0`.
928
+
929
+ [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
930
+ """
931
+ ns = self.noise_schedule
932
+ s = t_T * torch.ones((x.shape[0],)).to(x)
933
+ lambda_s = ns.marginal_lambda(s)
934
+ lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
935
+ h = h_init * torch.ones_like(s).to(x)
936
+ x_prev = x
937
+ nfe = 0
938
+ if order == 2:
939
+ r1 = 0.5
940
+ lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
941
+ higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs)
942
+ elif order == 3:
943
+ r1, r2 = 1. / 3., 2. / 3.
944
+ lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type)
945
+ higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs)
946
+ else:
947
+ raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
948
+ while torch.abs((s - t_0)).mean() > t_err:
949
+ t = ns.inverse_lambda(lambda_s + h)
950
+ x_lower, lower_noise_kwargs = lower_update(x, s, t)
951
+ x_higher = higher_update(x, s, t, **lower_noise_kwargs)
952
+ delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
953
+ norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
954
+ E = norm_fn((x_higher - x_lower) / delta).max()
955
+ if torch.all(E <= 1.):
956
+ x = x_higher
957
+ s = t
958
+ x_prev = x_lower
959
+ lambda_s = ns.marginal_lambda(s)
960
+ h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
961
+ nfe += order
962
+ print('adaptive solver nfe', nfe)
963
+ return x
964
+
965
+ def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
966
+ method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
967
+ atol=0.0078, rtol=0.05,
968
+ ):
969
+ """
970
+ Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
971
+
972
+ =====================================================
973
+
974
+ We support the following algorithms for both noise prediction model and data prediction model:
975
+ - 'singlestep':
976
+ Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
977
+ We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
978
+ The total number of function evaluations (NFE) == `steps`.
979
+ Given a fixed NFE == `steps`, the sampling procedure is:
980
+ - If `order` == 1:
981
+ - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
982
+ - If `order` == 2:
983
+ - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
984
+ - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
985
+ - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
986
+ - If `order` == 3:
987
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
988
+ - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
989
+ - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
990
+ - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
991
+ - 'multistep':
992
+ Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
993
+ We initialize the first `order` values by lower order multistep solvers.
994
+ Given a fixed NFE == `steps`, the sampling procedure is:
995
+ Denote K = steps.
996
+ - If `order` == 1:
997
+ - We use K steps of DPM-Solver-1 (i.e. DDIM).
998
+ - If `order` == 2:
999
+ - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
1000
+ - If `order` == 3:
1001
+ - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
1002
+ - 'singlestep_fixed':
1003
+ Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
1004
+ We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
1005
+ - 'adaptive':
1006
+ Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
1007
+ We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
1008
+ You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
1009
+ (NFE) and the sample quality.
1010
+ - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
1011
+ - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
1012
+
1013
+ =====================================================
1014
+
1015
+ Some advices for choosing the algorithm:
1016
+ - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
1017
+ Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
1018
+ e.g.
1019
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
1020
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
1021
+ skip_type='time_uniform', method='singlestep')
1022
+ - For **guided sampling with large guidance scale** by DPMs:
1023
+ Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
1024
+ e.g.
1025
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
1026
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
1027
+ skip_type='time_uniform', method='multistep')
1028
+
1029
+ We support three types of `skip_type`:
1030
+ - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
1031
+ - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
1032
+ - 'time_quadratic': quadratic time for the time steps.
1033
+
1034
+ =====================================================
1035
+ Args:
1036
+ x: A pytorch tensor. The initial value at time `t_start`
1037
+ e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
1038
+ steps: A `int`. The total number of function evaluations (NFE).
1039
+ t_start: A `float`. The starting time of the sampling.
1040
+ If `T` is None, we use self.noise_schedule.T (default is 1.0).
1041
+ t_end: A `float`. The ending time of the sampling.
1042
+ If `t_end` is None, we use 1. / self.noise_schedule.total_N.
1043
+ e.g. if total_N == 1000, we have `t_end` == 1e-3.
1044
+ For discrete-time DPMs:
1045
+ - We recommend `t_end` == 1. / self.noise_schedule.total_N.
1046
+ For continuous-time DPMs:
1047
+ - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
1048
+ order: A `int`. The order of DPM-Solver.
1049
+ skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
1050
+ method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
1051
+ denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
1052
+ Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
1053
+
1054
+ This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
1055
+ score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
1056
+ for diffusion models sampling by diffusion SDEs for low-resolutional images
1057
+ (such as CIFAR-10). However, we observed that such trick does not matter for
1058
+ high-resolutional images. As it needs an additional NFE, we do not recommend
1059
+ it for high-resolutional images.
1060
+ lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
1061
+ Only valid for `method=multistep` and `steps < 15`. We empirically find that
1062
+ this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
1063
+ (especially for steps <= 10). So we recommend to set it to be `True`.
1064
+ solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
1065
+ atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1066
+ rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1067
+ Returns:
1068
+ x_end: A pytorch tensor. The approximated solution at time `t_end`.
1069
+
1070
+ """
1071
+ t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
1072
+ t_T = self.noise_schedule.T if t_start is None else t_start
1073
+ device = x.device
1074
+ if method == 'adaptive':
1075
+ with torch.no_grad():
1076
+ x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type)
1077
+ elif method == 'multistep':
1078
+ assert steps >= order
1079
+ timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
1080
+ assert timesteps.shape[0] - 1 == steps
1081
+ with torch.no_grad():
1082
+ vec_t = timesteps[0].expand((x.shape[0]))
1083
+ model_prev_list = [self.model_fn(x, vec_t)]
1084
+ t_prev_list = [vec_t]
1085
+ # Init the first `order` values by lower order multistep DPM-Solver.
1086
+ for init_order in range(1, order):
1087
+ vec_t = timesteps[init_order].expand(x.shape[0])
1088
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, solver_type=solver_type)
1089
+ model_prev_list.append(self.model_fn(x, vec_t))
1090
+ t_prev_list.append(vec_t)
1091
+ # Compute the remaining values by `order`-th order multistep DPM-Solver.
1092
+ for step in range(order, steps + 1):
1093
+ vec_t = timesteps[step].expand(x.shape[0])
1094
+ if lower_order_final and steps < 15:
1095
+ step_order = min(order, steps + 1 - step)
1096
+ else:
1097
+ step_order = order
1098
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, solver_type=solver_type)
1099
+ for i in range(order - 1):
1100
+ t_prev_list[i] = t_prev_list[i + 1]
1101
+ model_prev_list[i] = model_prev_list[i + 1]
1102
+ t_prev_list[-1] = vec_t
1103
+ # We do not need to evaluate the final model value.
1104
+ if step < steps:
1105
+ model_prev_list[-1] = self.model_fn(x, vec_t)
1106
+ elif method in ['singlestep', 'singlestep_fixed']:
1107
+ if method == 'singlestep':
1108
+ timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device)
1109
+ elif method == 'singlestep_fixed':
1110
+ K = steps // order
1111
+ orders = [order,] * K
1112
+ timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
1113
+ for i, order in enumerate(orders):
1114
+ t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
1115
+ timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), N=order, device=device)
1116
+ lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
1117
+ vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])
1118
+ h = lambda_inner[-1] - lambda_inner[0]
1119
+ r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
1120
+ r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
1121
+ x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
1122
+ if denoise_to_zero:
1123
+ x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
1124
+ return x
1125
+
1126
+
1127
+
1128
+ #############################################################
1129
+ # other utility functions
1130
+ #############################################################
1131
+
1132
+ def interpolate_fn(x, xp, yp):
1133
+ """
1134
+ A piecewise linear function y = f(x), using xp and yp as keypoints.
1135
+ We implement f(x) in a differentiable way (i.e. applicable for autograd).
1136
+ The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
1137
+
1138
+ Args:
1139
+ x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
1140
+ xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
1141
+ yp: PyTorch tensor with shape [C, K].
1142
+ Returns:
1143
+ The function values f(x), with shape [N, C].
1144
+ """
1145
+ N, K = x.shape[0], xp.shape[1]
1146
+ all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
1147
+ sorted_all_x, x_indices = torch.sort(all_x, dim=2)
1148
+ x_idx = torch.argmin(x_indices, dim=2)
1149
+ cand_start_idx = x_idx - 1
1150
+ start_idx = torch.where(
1151
+ torch.eq(x_idx, 0),
1152
+ torch.tensor(1, device=x.device),
1153
+ torch.where(
1154
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1155
+ ),
1156
+ )
1157
+ end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
1158
+ start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
1159
+ end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
1160
+ start_idx2 = torch.where(
1161
+ torch.eq(x_idx, 0),
1162
+ torch.tensor(0, device=x.device),
1163
+ torch.where(
1164
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1165
+ ),
1166
+ )
1167
+ y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
1168
+ start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
1169
+ end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
1170
+ cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
1171
+ return cand
1172
+
1173
+
1174
+ def expand_dims(v, dims):
1175
+ """
1176
+ Expand the tensor `v` to the dim `dims`.
1177
+
1178
+ Args:
1179
+ `v`: a PyTorch tensor with shape [N].
1180
+ `dim`: a `int`.
1181
+ Returns:
1182
+ a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
1183
+ """
1184
+ return v[(...,) + (None,)*(dims - 1)]
3DTopia/ldm/models/diffusion/dpm_solver/sampler.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SAMPLING ONLY."""
2
+
3
+ import torch
4
+
5
+ from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver
6
+
7
+
8
+ class DPMSolverSampler(object):
9
+ def __init__(self, model, **kwargs):
10
+ super().__init__()
11
+ self.model = model
12
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
13
+ self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
14
+
15
+ def register_buffer(self, name, attr):
16
+ if type(attr) == torch.Tensor:
17
+ if attr.device != torch.device("cuda"):
18
+ attr = attr.to(torch.device("cuda"))
19
+ setattr(self, name, attr)
20
+
21
+ @torch.no_grad()
22
+ def sample(self,
23
+ S,
24
+ batch_size,
25
+ shape,
26
+ conditioning=None,
27
+ callback=None,
28
+ normals_sequence=None,
29
+ img_callback=None,
30
+ quantize_x0=False,
31
+ eta=0.,
32
+ mask=None,
33
+ x0=None,
34
+ temperature=1.,
35
+ noise_dropout=0.,
36
+ score_corrector=None,
37
+ corrector_kwargs=None,
38
+ verbose=True,
39
+ x_T=None,
40
+ log_every_t=100,
41
+ unconditional_guidance_scale=1.,
42
+ unconditional_conditioning=None,
43
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
44
+ **kwargs
45
+ ):
46
+ if conditioning is not None:
47
+ if isinstance(conditioning, dict):
48
+ cbs = conditioning[list(conditioning.keys())[0]].shape[0]
49
+ if cbs != batch_size:
50
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
51
+ else:
52
+ if conditioning.shape[0] != batch_size:
53
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
54
+
55
+ # sampling
56
+ C, H, W = shape
57
+ size = (batch_size, C, H, W)
58
+
59
+ # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')
60
+
61
+ device = self.model.betas.device
62
+ if x_T is None:
63
+ img = torch.randn(size, device=device)
64
+ else:
65
+ img = x_T
66
+
67
+ ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
68
+
69
+ model_fn = model_wrapper(
70
+ lambda x, t, c: self.model.apply_model(x, t, c),
71
+ ns,
72
+ model_type="noise",
73
+ guidance_type="classifier-free",
74
+ condition=conditioning,
75
+ unconditional_condition=unconditional_conditioning,
76
+ guidance_scale=unconditional_guidance_scale,
77
+ )
78
+
79
+ dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False)
80
+ x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True)
81
+
82
+ return x.to(device), None
3DTopia/ldm/models/diffusion/plms.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SAMPLING ONLY."""
2
+
3
+ import torch
4
+ import numpy as np
5
+ from tqdm import tqdm
6
+ from functools import partial
7
+
8
+ from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
9
+
10
+
11
+ class PLMSSampler(object):
12
+ def __init__(self, model, schedule="linear", **kwargs):
13
+ super().__init__()
14
+ self.model = model
15
+ self.ddpm_num_timesteps = model.num_timesteps
16
+ self.schedule = schedule
17
+
18
+ def register_buffer(self, name, attr):
19
+ if type(attr) == torch.Tensor:
20
+ if attr.device != torch.device("cuda"):
21
+ attr = attr.to(torch.device("cuda"))
22
+ setattr(self, name, attr)
23
+
24
+ def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
25
+ if ddim_eta != 0:
26
+ raise ValueError('ddim_eta must be 0 for PLMS')
27
+ self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
28
+ num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
29
+ alphas_cumprod = self.model.alphas_cumprod
30
+ assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
31
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
32
+
33
+ self.register_buffer('betas', to_torch(self.model.betas))
34
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
35
+ self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
36
+
37
+ # calculations for diffusion q(x_t | x_{t-1}) and others
38
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
39
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
40
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
41
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
42
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
43
+
44
+ # ddim sampling parameters
45
+ ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
46
+ ddim_timesteps=self.ddim_timesteps,
47
+ eta=ddim_eta,verbose=verbose)
48
+ self.register_buffer('ddim_sigmas', ddim_sigmas)
49
+ self.register_buffer('ddim_alphas', ddim_alphas)
50
+ self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
51
+ self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
52
+ sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
53
+ (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
54
+ 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
55
+ self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
56
+
57
+ @torch.no_grad()
58
+ def sample(self,
59
+ S,
60
+ batch_size,
61
+ shape,
62
+ conditioning=None,
63
+ callback=None,
64
+ normals_sequence=None,
65
+ img_callback=None,
66
+ quantize_x0=False,
67
+ eta=0.,
68
+ mask=None,
69
+ x0=None,
70
+ temperature=1.,
71
+ noise_dropout=0.,
72
+ score_corrector=None,
73
+ corrector_kwargs=None,
74
+ verbose=True,
75
+ x_T=None,
76
+ log_every_t=100,
77
+ unconditional_guidance_scale=1.,
78
+ unconditional_conditioning=None,
79
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
80
+ **kwargs
81
+ ):
82
+ if conditioning is not None:
83
+ if isinstance(conditioning, dict):
84
+ cbs = conditioning[list(conditioning.keys())[0]].shape[0]
85
+ if cbs != batch_size:
86
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
87
+ else:
88
+ if conditioning.shape[0] != batch_size:
89
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
90
+
91
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
92
+ # sampling
93
+ C, H, W = shape
94
+ size = (batch_size, C, H, W)
95
+ print(f'Data shape for PLMS sampling is {size}')
96
+
97
+ samples, intermediates = self.plms_sampling(conditioning, size,
98
+ callback=callback,
99
+ img_callback=img_callback,
100
+ quantize_denoised=quantize_x0,
101
+ mask=mask, x0=x0,
102
+ ddim_use_original_steps=False,
103
+ noise_dropout=noise_dropout,
104
+ temperature=temperature,
105
+ score_corrector=score_corrector,
106
+ corrector_kwargs=corrector_kwargs,
107
+ x_T=x_T,
108
+ log_every_t=log_every_t,
109
+ unconditional_guidance_scale=unconditional_guidance_scale,
110
+ unconditional_conditioning=unconditional_conditioning,
111
+ )
112
+ return samples, intermediates
113
+
114
+ @torch.no_grad()
115
+ def plms_sampling(self, cond, shape,
116
+ x_T=None, ddim_use_original_steps=False,
117
+ callback=None, timesteps=None, quantize_denoised=False,
118
+ mask=None, x0=None, img_callback=None, log_every_t=100,
119
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
120
+ unconditional_guidance_scale=1., unconditional_conditioning=None,):
121
+ device = self.model.betas.device
122
+ b = shape[0]
123
+ if x_T is None:
124
+ img = torch.randn(shape, device=device)
125
+ else:
126
+ img = x_T
127
+
128
+ if timesteps is None:
129
+ timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
130
+ elif timesteps is not None and not ddim_use_original_steps:
131
+ subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
132
+ timesteps = self.ddim_timesteps[:subset_end]
133
+
134
+ intermediates = {'x_inter': [img], 'pred_x0': [img]}
135
+ time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
136
+ total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
137
+ print(f"Running PLMS Sampling with {total_steps} timesteps")
138
+
139
+ iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
140
+ old_eps = []
141
+
142
+ for i, step in enumerate(iterator):
143
+ index = total_steps - i - 1
144
+ ts = torch.full((b,), step, device=device, dtype=torch.long)
145
+ ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
146
+
147
+ if mask is not None:
148
+ assert x0 is not None
149
+ img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
150
+ img = img_orig * mask + (1. - mask) * img
151
+
152
+ outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
153
+ quantize_denoised=quantize_denoised, temperature=temperature,
154
+ noise_dropout=noise_dropout, score_corrector=score_corrector,
155
+ corrector_kwargs=corrector_kwargs,
156
+ unconditional_guidance_scale=unconditional_guidance_scale,
157
+ unconditional_conditioning=unconditional_conditioning,
158
+ old_eps=old_eps, t_next=ts_next)
159
+ img, pred_x0, e_t = outs
160
+ old_eps.append(e_t)
161
+ if len(old_eps) >= 4:
162
+ old_eps.pop(0)
163
+ if callback: callback(i)
164
+ if img_callback: img_callback(pred_x0, i)
165
+
166
+ if index % log_every_t == 0 or index == total_steps - 1:
167
+ intermediates['x_inter'].append(img)
168
+ intermediates['pred_x0'].append(pred_x0)
169
+
170
+ return img, intermediates
171
+
172
+ @torch.no_grad()
173
+ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
174
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
175
+ unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):
176
+ b, *_, device = *x.shape, x.device
177
+
178
+ def get_model_output(x, t):
179
+ if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
180
+ e_t = self.model.apply_model(x, t, c)
181
+ else:
182
+ x_in = torch.cat([x] * 2)
183
+ t_in = torch.cat([t] * 2)
184
+ c_in = torch.cat([unconditional_conditioning, c])
185
+ e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
186
+ e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
187
+
188
+ if score_corrector is not None:
189
+ assert self.model.parameterization == "eps"
190
+ e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
191
+
192
+ return e_t
193
+
194
+ alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
195
+ alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
196
+ sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
197
+ sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
198
+
199
+ def get_x_prev_and_pred_x0(e_t, index):
200
+ # select parameters corresponding to the currently considered timestep
201
+ a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
202
+ a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
203
+ sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
204
+ sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
205
+
206
+ # current prediction for x_0
207
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
208
+ if quantize_denoised:
209
+ pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
210
+ # direction pointing to x_t
211
+ dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
212
+ noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
213
+ if noise_dropout > 0.:
214
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
215
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
216
+ return x_prev, pred_x0
217
+
218
+ e_t = get_model_output(x, t)
219
+ if len(old_eps) == 0:
220
+ # Pseudo Improved Euler (2nd order)
221
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
222
+ e_t_next = get_model_output(x_prev, t_next)
223
+ e_t_prime = (e_t + e_t_next) / 2
224
+ elif len(old_eps) == 1:
225
+ # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
226
+ e_t_prime = (3 * e_t - old_eps[-1]) / 2
227
+ elif len(old_eps) == 2:
228
+ # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
229
+ e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
230
+ elif len(old_eps) >= 3:
231
+ # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
232
+ e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
233
+
234
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
235
+
236
+ return x_prev, pred_x0, e_t
3DTopia/ldm/modules/attention.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from inspect import isfunction
2
+ import math
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from torch import nn, einsum
6
+ from einops import rearrange, repeat
7
+
8
+ from ldm.modules.diffusionmodules.util import checkpoint
9
+
10
+
11
+ def exists(val):
12
+ return val is not None
13
+
14
+
15
+ def uniq(arr):
16
+ return{el: True for el in arr}.keys()
17
+
18
+
19
+ def default(val, d):
20
+ if exists(val):
21
+ return val
22
+ return d() if isfunction(d) else d
23
+
24
+
25
+ def max_neg_value(t):
26
+ return -torch.finfo(t.dtype).max
27
+
28
+
29
+ def init_(tensor):
30
+ dim = tensor.shape[-1]
31
+ std = 1 / math.sqrt(dim)
32
+ tensor.uniform_(-std, std)
33
+ return tensor
34
+
35
+
36
+ # feedforward
37
+ class GEGLU(nn.Module):
38
+ def __init__(self, dim_in, dim_out):
39
+ super().__init__()
40
+ self.proj = nn.Linear(dim_in, dim_out * 2)
41
+
42
+ def forward(self, x):
43
+ x, gate = self.proj(x).chunk(2, dim=-1)
44
+ return x * F.gelu(gate)
45
+
46
+
47
+ class FeedForward(nn.Module):
48
+ def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
49
+ super().__init__()
50
+ inner_dim = int(dim * mult)
51
+ dim_out = default(dim_out, dim)
52
+ project_in = nn.Sequential(
53
+ nn.Linear(dim, inner_dim),
54
+ nn.GELU()
55
+ ) if not glu else GEGLU(dim, inner_dim)
56
+
57
+ self.net = nn.Sequential(
58
+ project_in,
59
+ nn.Dropout(dropout),
60
+ nn.Linear(inner_dim, dim_out)
61
+ )
62
+
63
+ def forward(self, x):
64
+ return self.net(x)
65
+
66
+
67
+ def zero_module(module):
68
+ """
69
+ Zero out the parameters of a module and return it.
70
+ """
71
+ for p in module.parameters():
72
+ p.detach().zero_()
73
+ return module
74
+
75
+
76
+ def Normalize(in_channels):
77
+ return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
78
+
79
+
80
+ class LinearAttention(nn.Module):
81
+ def __init__(self, dim, heads=4, dim_head=32):
82
+ super().__init__()
83
+ self.heads = heads
84
+ hidden_dim = dim_head * heads
85
+ self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
86
+ self.to_out = nn.Conv2d(hidden_dim, dim, 1)
87
+
88
+ def forward(self, x):
89
+ b, c, h, w = x.shape
90
+ qkv = self.to_qkv(x)
91
+ q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3)
92
+ k = k.softmax(dim=-1)
93
+ context = torch.einsum('bhdn,bhen->bhde', k, v)
94
+ out = torch.einsum('bhde,bhdn->bhen', context, q)
95
+ out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
96
+ return self.to_out(out)
97
+
98
+
99
+ class SpatialSelfAttention(nn.Module):
100
+ def __init__(self, in_channels):
101
+ super().__init__()
102
+ self.in_channels = in_channels
103
+
104
+ self.norm = Normalize(in_channels)
105
+ self.q = torch.nn.Conv2d(in_channels,
106
+ in_channels,
107
+ kernel_size=1,
108
+ stride=1,
109
+ padding=0)
110
+ self.k = torch.nn.Conv2d(in_channels,
111
+ in_channels,
112
+ kernel_size=1,
113
+ stride=1,
114
+ padding=0)
115
+ self.v = torch.nn.Conv2d(in_channels,
116
+ in_channels,
117
+ kernel_size=1,
118
+ stride=1,
119
+ padding=0)
120
+ self.proj_out = torch.nn.Conv2d(in_channels,
121
+ in_channels,
122
+ kernel_size=1,
123
+ stride=1,
124
+ padding=0)
125
+
126
+ def forward(self, x):
127
+ h_ = x
128
+ h_ = self.norm(h_)
129
+ q = self.q(h_)
130
+ k = self.k(h_)
131
+ v = self.v(h_)
132
+
133
+ # compute attention
134
+ b,c,h,w = q.shape
135
+ q = rearrange(q, 'b c h w -> b (h w) c')
136
+ k = rearrange(k, 'b c h w -> b c (h w)')
137
+ w_ = torch.einsum('bij,bjk->bik', q, k)
138
+
139
+ w_ = w_ * (int(c)**(-0.5))
140
+ w_ = torch.nn.functional.softmax(w_, dim=2)
141
+
142
+ # attend to values
143
+ v = rearrange(v, 'b c h w -> b c (h w)')
144
+ w_ = rearrange(w_, 'b i j -> b j i')
145
+ h_ = torch.einsum('bij,bjk->bik', v, w_)
146
+ h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
147
+ h_ = self.proj_out(h_)
148
+
149
+ return x+h_
150
+
151
+
152
+ class CrossAttention(nn.Module):
153
+ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
154
+ super().__init__()
155
+ inner_dim = dim_head * heads
156
+ context_dim = default(context_dim, query_dim)
157
+
158
+ self.scale = dim_head ** -0.5
159
+ self.heads = heads
160
+
161
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
162
+ self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
163
+ self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
164
+
165
+ self.to_out = nn.Sequential(
166
+ nn.Linear(inner_dim, query_dim),
167
+ nn.Dropout(dropout)
168
+ )
169
+
170
+ def forward(self, x, context=None, mask=None):
171
+ h = self.heads
172
+
173
+ q = self.to_q(x)
174
+ context = default(context, x)
175
+ k = self.to_k(context)
176
+ v = self.to_v(context)
177
+
178
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
179
+
180
+ sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
181
+
182
+ if exists(mask):
183
+ mask = rearrange(mask, 'b ... -> b (...)')
184
+ max_neg_value = -torch.finfo(sim.dtype).max
185
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
186
+ sim.masked_fill_(~mask, max_neg_value)
187
+
188
+ # attention, what we cannot get enough of
189
+ attn = sim.softmax(dim=-1)
190
+
191
+ out = einsum('b i j, b j d -> b i d', attn, v)
192
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
193
+ return self.to_out(out)
194
+
195
+
196
+ class BasicTransformerBlock(nn.Module):
197
+ def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True):
198
+ super().__init__()
199
+ self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention
200
+ self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
201
+ self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim,
202
+ heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none
203
+ self.norm1 = nn.LayerNorm(dim)
204
+ self.norm2 = nn.LayerNorm(dim)
205
+ self.norm3 = nn.LayerNorm(dim)
206
+ self.checkpoint = checkpoint
207
+
208
+ def forward(self, x, context=None):
209
+ return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
210
+
211
+ def _forward(self, x, context=None):
212
+ x = self.attn1(self.norm1(x)) + x
213
+ x = self.attn2(self.norm2(x), context=context) + x
214
+ x = self.ff(self.norm3(x)) + x
215
+ return x
216
+
217
+
218
+ class SpatialTransformer(nn.Module):
219
+ """
220
+ Transformer block for image-like data.
221
+ First, project the input (aka embedding)
222
+ and reshape to b, t, d.
223
+ Then apply standard transformer action.
224
+ Finally, reshape to image
225
+ """
226
+ def __init__(self, in_channels, n_heads, d_head,
227
+ depth=1, dropout=0., context_dim=None):
228
+ super().__init__()
229
+ self.in_channels = in_channels
230
+ inner_dim = n_heads * d_head
231
+ self.norm = Normalize(in_channels)
232
+
233
+ self.proj_in = nn.Conv2d(in_channels,
234
+ inner_dim,
235
+ kernel_size=1,
236
+ stride=1,
237
+ padding=0)
238
+
239
+ self.transformer_blocks = nn.ModuleList(
240
+ [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim)
241
+ for d in range(depth)]
242
+ )
243
+
244
+ self.proj_out = zero_module(nn.Conv2d(inner_dim,
245
+ in_channels,
246
+ kernel_size=1,
247
+ stride=1,
248
+ padding=0))
249
+
250
+ def forward(self, x, context=None):
251
+ # note: if no context is given, cross-attention defaults to self-attention
252
+ b, c, h, w = x.shape
253
+ x_in = x
254
+ x = self.norm(x)
255
+ x = self.proj_in(x)
256
+ x = rearrange(x, 'b c h w -> b (h w) c')
257
+ for block in self.transformer_blocks:
258
+ x = block(x, context=context)
259
+ x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)
260
+ x = self.proj_out(x)
261
+ return x + x_in
3DTopia/ldm/modules/diffusionmodules/__init__.py ADDED
File without changes
3DTopia/ldm/modules/diffusionmodules/model.py ADDED
@@ -0,0 +1,835 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pytorch_diffusion + derived encoder decoder
2
+ import math
3
+ import torch
4
+ import torch.nn as nn
5
+ import numpy as np
6
+ from einops import rearrange
7
+
8
+ from ldm.util import instantiate_from_config
9
+ from ldm.modules.attention import LinearAttention
10
+
11
+
12
+ def get_timestep_embedding(timesteps, embedding_dim):
13
+ """
14
+ This matches the implementation in Denoising Diffusion Probabilistic Models:
15
+ From Fairseq.
16
+ Build sinusoidal embeddings.
17
+ This matches the implementation in tensor2tensor, but differs slightly
18
+ from the description in Section 3.5 of "Attention Is All You Need".
19
+ """
20
+ assert len(timesteps.shape) == 1
21
+
22
+ half_dim = embedding_dim // 2
23
+ emb = math.log(10000) / (half_dim - 1)
24
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
25
+ emb = emb.to(device=timesteps.device)
26
+ emb = timesteps.float()[:, None] * emb[None, :]
27
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
28
+ if embedding_dim % 2 == 1: # zero pad
29
+ emb = torch.nn.functional.pad(emb, (0,1,0,0))
30
+ return emb
31
+
32
+
33
+ def nonlinearity(x):
34
+ # swish
35
+ return x*torch.sigmoid(x)
36
+
37
+
38
+ def Normalize(in_channels, num_groups=32):
39
+ return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
40
+
41
+
42
+ class Upsample(nn.Module):
43
+ def __init__(self, in_channels, with_conv):
44
+ super().__init__()
45
+ self.with_conv = with_conv
46
+ if self.with_conv:
47
+ self.conv = torch.nn.Conv2d(in_channels,
48
+ in_channels,
49
+ kernel_size=3,
50
+ stride=1,
51
+ padding=1)
52
+
53
+ def forward(self, x):
54
+ x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
55
+ if self.with_conv:
56
+ x = self.conv(x)
57
+ return x
58
+
59
+
60
+ class Downsample(nn.Module):
61
+ def __init__(self, in_channels, with_conv):
62
+ super().__init__()
63
+ self.with_conv = with_conv
64
+ if self.with_conv:
65
+ # no asymmetric padding in torch conv, must do it ourselves
66
+ self.conv = torch.nn.Conv2d(in_channels,
67
+ in_channels,
68
+ kernel_size=3,
69
+ stride=2,
70
+ padding=0)
71
+
72
+ def forward(self, x):
73
+ if self.with_conv:
74
+ pad = (0,1,0,1)
75
+ x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
76
+ x = self.conv(x)
77
+ else:
78
+ x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
79
+ return x
80
+
81
+
82
+ class ResnetBlock(nn.Module):
83
+ def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
84
+ dropout, temb_channels=512):
85
+ super().__init__()
86
+ self.in_channels = in_channels
87
+ out_channels = in_channels if out_channels is None else out_channels
88
+ self.out_channels = out_channels
89
+ self.use_conv_shortcut = conv_shortcut
90
+
91
+ self.norm1 = Normalize(in_channels)
92
+ self.conv1 = torch.nn.Conv2d(in_channels,
93
+ out_channels,
94
+ kernel_size=3,
95
+ stride=1,
96
+ padding=1)
97
+ if temb_channels > 0:
98
+ self.temb_proj = torch.nn.Linear(temb_channels,
99
+ out_channels)
100
+ self.norm2 = Normalize(out_channels)
101
+ self.dropout = torch.nn.Dropout(dropout)
102
+ self.conv2 = torch.nn.Conv2d(out_channels,
103
+ out_channels,
104
+ kernel_size=3,
105
+ stride=1,
106
+ padding=1)
107
+ if self.in_channels != self.out_channels:
108
+ if self.use_conv_shortcut:
109
+ self.conv_shortcut = torch.nn.Conv2d(in_channels,
110
+ out_channels,
111
+ kernel_size=3,
112
+ stride=1,
113
+ padding=1)
114
+ else:
115
+ self.nin_shortcut = torch.nn.Conv2d(in_channels,
116
+ out_channels,
117
+ kernel_size=1,
118
+ stride=1,
119
+ padding=0)
120
+
121
+ def forward(self, x, temb):
122
+ h = x
123
+ h = self.norm1(h)
124
+ h = nonlinearity(h)
125
+ h = self.conv1(h)
126
+
127
+ if temb is not None:
128
+ h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
129
+
130
+ h = self.norm2(h)
131
+ h = nonlinearity(h)
132
+ h = self.dropout(h)
133
+ h = self.conv2(h)
134
+
135
+ if self.in_channels != self.out_channels:
136
+ if self.use_conv_shortcut:
137
+ x = self.conv_shortcut(x)
138
+ else:
139
+ x = self.nin_shortcut(x)
140
+
141
+ return x+h
142
+
143
+
144
+ class LinAttnBlock(LinearAttention):
145
+ """to match AttnBlock usage"""
146
+ def __init__(self, in_channels):
147
+ super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
148
+
149
+
150
+ class AttnBlock(nn.Module):
151
+ def __init__(self, in_channels):
152
+ super().__init__()
153
+ self.in_channels = in_channels
154
+
155
+ self.norm = Normalize(in_channels)
156
+ self.q = torch.nn.Conv2d(in_channels,
157
+ in_channels,
158
+ kernel_size=1,
159
+ stride=1,
160
+ padding=0)
161
+ self.k = torch.nn.Conv2d(in_channels,
162
+ in_channels,
163
+ kernel_size=1,
164
+ stride=1,
165
+ padding=0)
166
+ self.v = torch.nn.Conv2d(in_channels,
167
+ in_channels,
168
+ kernel_size=1,
169
+ stride=1,
170
+ padding=0)
171
+ self.proj_out = torch.nn.Conv2d(in_channels,
172
+ in_channels,
173
+ kernel_size=1,
174
+ stride=1,
175
+ padding=0)
176
+
177
+
178
+ def forward(self, x):
179
+ h_ = x
180
+ h_ = self.norm(h_)
181
+ q = self.q(h_)
182
+ k = self.k(h_)
183
+ v = self.v(h_)
184
+
185
+ # compute attention
186
+ b,c,h,w = q.shape
187
+ q = q.reshape(b,c,h*w)
188
+ q = q.permute(0,2,1) # b,hw,c
189
+ k = k.reshape(b,c,h*w) # b,c,hw
190
+ w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
191
+ w_ = w_ * (int(c)**(-0.5))
192
+ w_ = torch.nn.functional.softmax(w_, dim=2)
193
+
194
+ # attend to values
195
+ v = v.reshape(b,c,h*w)
196
+ w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
197
+ h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
198
+ h_ = h_.reshape(b,c,h,w)
199
+
200
+ h_ = self.proj_out(h_)
201
+
202
+ return x+h_
203
+
204
+
205
+ def make_attn(in_channels, attn_type="vanilla"):
206
+ assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown'
207
+ print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
208
+ if attn_type == "vanilla":
209
+ return AttnBlock(in_channels)
210
+ elif attn_type == "none":
211
+ return nn.Identity(in_channels)
212
+ else:
213
+ return LinAttnBlock(in_channels)
214
+
215
+
216
+ class Model(nn.Module):
217
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
218
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
219
+ resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
220
+ super().__init__()
221
+ if use_linear_attn: attn_type = "linear"
222
+ self.ch = ch
223
+ self.temb_ch = self.ch*4
224
+ self.num_resolutions = len(ch_mult)
225
+ self.num_res_blocks = num_res_blocks
226
+ self.resolution = resolution
227
+ self.in_channels = in_channels
228
+
229
+ self.use_timestep = use_timestep
230
+ if self.use_timestep:
231
+ # timestep embedding
232
+ self.temb = nn.Module()
233
+ self.temb.dense = nn.ModuleList([
234
+ torch.nn.Linear(self.ch,
235
+ self.temb_ch),
236
+ torch.nn.Linear(self.temb_ch,
237
+ self.temb_ch),
238
+ ])
239
+
240
+ # downsampling
241
+ self.conv_in = torch.nn.Conv2d(in_channels,
242
+ self.ch,
243
+ kernel_size=3,
244
+ stride=1,
245
+ padding=1)
246
+
247
+ curr_res = resolution
248
+ in_ch_mult = (1,)+tuple(ch_mult)
249
+ self.down = nn.ModuleList()
250
+ for i_level in range(self.num_resolutions):
251
+ block = nn.ModuleList()
252
+ attn = nn.ModuleList()
253
+ block_in = ch*in_ch_mult[i_level]
254
+ block_out = ch*ch_mult[i_level]
255
+ for i_block in range(self.num_res_blocks):
256
+ block.append(ResnetBlock(in_channels=block_in,
257
+ out_channels=block_out,
258
+ temb_channels=self.temb_ch,
259
+ dropout=dropout))
260
+ block_in = block_out
261
+ if curr_res in attn_resolutions:
262
+ attn.append(make_attn(block_in, attn_type=attn_type))
263
+ down = nn.Module()
264
+ down.block = block
265
+ down.attn = attn
266
+ if i_level != self.num_resolutions-1:
267
+ down.downsample = Downsample(block_in, resamp_with_conv)
268
+ curr_res = curr_res // 2
269
+ self.down.append(down)
270
+
271
+ # middle
272
+ self.mid = nn.Module()
273
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
274
+ out_channels=block_in,
275
+ temb_channels=self.temb_ch,
276
+ dropout=dropout)
277
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
278
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
279
+ out_channels=block_in,
280
+ temb_channels=self.temb_ch,
281
+ dropout=dropout)
282
+
283
+ # upsampling
284
+ self.up = nn.ModuleList()
285
+ for i_level in reversed(range(self.num_resolutions)):
286
+ block = nn.ModuleList()
287
+ attn = nn.ModuleList()
288
+ block_out = ch*ch_mult[i_level]
289
+ skip_in = ch*ch_mult[i_level]
290
+ for i_block in range(self.num_res_blocks+1):
291
+ if i_block == self.num_res_blocks:
292
+ skip_in = ch*in_ch_mult[i_level]
293
+ block.append(ResnetBlock(in_channels=block_in+skip_in,
294
+ out_channels=block_out,
295
+ temb_channels=self.temb_ch,
296
+ dropout=dropout))
297
+ block_in = block_out
298
+ if curr_res in attn_resolutions:
299
+ attn.append(make_attn(block_in, attn_type=attn_type))
300
+ up = nn.Module()
301
+ up.block = block
302
+ up.attn = attn
303
+ if i_level != 0:
304
+ up.upsample = Upsample(block_in, resamp_with_conv)
305
+ curr_res = curr_res * 2
306
+ self.up.insert(0, up) # prepend to get consistent order
307
+
308
+ # end
309
+ self.norm_out = Normalize(block_in)
310
+ self.conv_out = torch.nn.Conv2d(block_in,
311
+ out_ch,
312
+ kernel_size=3,
313
+ stride=1,
314
+ padding=1)
315
+
316
+ def forward(self, x, t=None, context=None):
317
+ #assert x.shape[2] == x.shape[3] == self.resolution
318
+ if context is not None:
319
+ # assume aligned context, cat along channel axis
320
+ x = torch.cat((x, context), dim=1)
321
+ if self.use_timestep:
322
+ # timestep embedding
323
+ assert t is not None
324
+ temb = get_timestep_embedding(t, self.ch)
325
+ temb = self.temb.dense[0](temb)
326
+ temb = nonlinearity(temb)
327
+ temb = self.temb.dense[1](temb)
328
+ else:
329
+ temb = None
330
+
331
+ # downsampling
332
+ hs = [self.conv_in(x)]
333
+ for i_level in range(self.num_resolutions):
334
+ for i_block in range(self.num_res_blocks):
335
+ h = self.down[i_level].block[i_block](hs[-1], temb)
336
+ if len(self.down[i_level].attn) > 0:
337
+ h = self.down[i_level].attn[i_block](h)
338
+ hs.append(h)
339
+ if i_level != self.num_resolutions-1:
340
+ hs.append(self.down[i_level].downsample(hs[-1]))
341
+
342
+ # middle
343
+ h = hs[-1]
344
+ h = self.mid.block_1(h, temb)
345
+ h = self.mid.attn_1(h)
346
+ h = self.mid.block_2(h, temb)
347
+
348
+ # upsampling
349
+ for i_level in reversed(range(self.num_resolutions)):
350
+ for i_block in range(self.num_res_blocks+1):
351
+ h = self.up[i_level].block[i_block](
352
+ torch.cat([h, hs.pop()], dim=1), temb)
353
+ if len(self.up[i_level].attn) > 0:
354
+ h = self.up[i_level].attn[i_block](h)
355
+ if i_level != 0:
356
+ h = self.up[i_level].upsample(h)
357
+
358
+ # end
359
+ h = self.norm_out(h)
360
+ h = nonlinearity(h)
361
+ h = self.conv_out(h)
362
+ return h
363
+
364
+ def get_last_layer(self):
365
+ return self.conv_out.weight
366
+
367
+
368
+ class Encoder(nn.Module):
369
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
370
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
371
+ resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
372
+ **ignore_kwargs):
373
+ super().__init__()
374
+ if use_linear_attn: attn_type = "linear"
375
+ self.ch = ch
376
+ self.temb_ch = 0
377
+ self.num_resolutions = len(ch_mult)
378
+ self.num_res_blocks = num_res_blocks
379
+ self.resolution = resolution
380
+ self.in_channels = in_channels
381
+
382
+ # downsampling
383
+ self.conv_in = torch.nn.Conv2d(in_channels,
384
+ self.ch,
385
+ kernel_size=3,
386
+ stride=1,
387
+ padding=1)
388
+
389
+ curr_res = resolution
390
+ in_ch_mult = (1,)+tuple(ch_mult)
391
+ self.in_ch_mult = in_ch_mult
392
+ self.down = nn.ModuleList()
393
+ for i_level in range(self.num_resolutions):
394
+ block = nn.ModuleList()
395
+ attn = nn.ModuleList()
396
+ block_in = ch*in_ch_mult[i_level]
397
+ block_out = ch*ch_mult[i_level]
398
+ for i_block in range(self.num_res_blocks):
399
+ block.append(ResnetBlock(in_channels=block_in,
400
+ out_channels=block_out,
401
+ temb_channels=self.temb_ch,
402
+ dropout=dropout))
403
+ block_in = block_out
404
+ if curr_res in attn_resolutions:
405
+ attn.append(make_attn(block_in, attn_type=attn_type))
406
+ down = nn.Module()
407
+ down.block = block
408
+ down.attn = attn
409
+ if i_level != self.num_resolutions-1:
410
+ down.downsample = Downsample(block_in, resamp_with_conv)
411
+ curr_res = curr_res // 2
412
+ self.down.append(down)
413
+
414
+ # middle
415
+ self.mid = nn.Module()
416
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
417
+ out_channels=block_in,
418
+ temb_channels=self.temb_ch,
419
+ dropout=dropout)
420
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
421
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
422
+ out_channels=block_in,
423
+ temb_channels=self.temb_ch,
424
+ dropout=dropout)
425
+
426
+ # end
427
+ self.norm_out = Normalize(block_in)
428
+ self.conv_out = torch.nn.Conv2d(block_in,
429
+ 2*z_channels if double_z else z_channels,
430
+ kernel_size=3,
431
+ stride=1,
432
+ padding=1)
433
+
434
+ def forward(self, x):
435
+ # timestep embedding
436
+ temb = None
437
+
438
+ # downsampling
439
+ hs = [self.conv_in(x)]
440
+ for i_level in range(self.num_resolutions):
441
+ for i_block in range(self.num_res_blocks):
442
+ h = self.down[i_level].block[i_block](hs[-1], temb)
443
+ if len(self.down[i_level].attn) > 0:
444
+ h = self.down[i_level].attn[i_block](h)
445
+ hs.append(h)
446
+ if i_level != self.num_resolutions-1:
447
+ hs.append(self.down[i_level].downsample(hs[-1]))
448
+
449
+ # middle
450
+ h = hs[-1]
451
+ h = self.mid.block_1(h, temb)
452
+ h = self.mid.attn_1(h)
453
+ h = self.mid.block_2(h, temb)
454
+
455
+ # end
456
+ h = self.norm_out(h)
457
+ h = nonlinearity(h)
458
+ h = self.conv_out(h)
459
+ return h
460
+
461
+
462
+ class Decoder(nn.Module):
463
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
464
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
465
+ resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
466
+ attn_type="vanilla", **ignorekwargs):
467
+ super().__init__()
468
+ if use_linear_attn: attn_type = "linear"
469
+ self.ch = ch
470
+ self.temb_ch = 0
471
+ self.num_resolutions = len(ch_mult)
472
+ self.num_res_blocks = num_res_blocks
473
+ self.resolution = resolution
474
+ self.in_channels = in_channels
475
+ self.give_pre_end = give_pre_end
476
+ self.tanh_out = tanh_out
477
+
478
+ # compute in_ch_mult, block_in and curr_res at lowest res
479
+ in_ch_mult = (1,)+tuple(ch_mult)
480
+ block_in = ch*ch_mult[self.num_resolutions-1]
481
+ curr_res = resolution // 2**(self.num_resolutions-1)
482
+ self.z_shape = (1,z_channels,curr_res,curr_res)
483
+ print("Working with z of shape {} = {} dimensions.".format(
484
+ self.z_shape, np.prod(self.z_shape)))
485
+
486
+ # z to block_in
487
+ self.conv_in = torch.nn.Conv2d(z_channels,
488
+ block_in,
489
+ kernel_size=3,
490
+ stride=1,
491
+ padding=1)
492
+
493
+ # middle
494
+ self.mid = nn.Module()
495
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
496
+ out_channels=block_in,
497
+ temb_channels=self.temb_ch,
498
+ dropout=dropout)
499
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
500
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
501
+ out_channels=block_in,
502
+ temb_channels=self.temb_ch,
503
+ dropout=dropout)
504
+
505
+ # upsampling
506
+ self.up = nn.ModuleList()
507
+ for i_level in reversed(range(self.num_resolutions)):
508
+ block = nn.ModuleList()
509
+ attn = nn.ModuleList()
510
+ block_out = ch*ch_mult[i_level]
511
+ for i_block in range(self.num_res_blocks+1):
512
+ block.append(ResnetBlock(in_channels=block_in,
513
+ out_channels=block_out,
514
+ temb_channels=self.temb_ch,
515
+ dropout=dropout))
516
+ block_in = block_out
517
+ if curr_res in attn_resolutions:
518
+ attn.append(make_attn(block_in, attn_type=attn_type))
519
+ up = nn.Module()
520
+ up.block = block
521
+ up.attn = attn
522
+ if i_level != 0:
523
+ up.upsample = Upsample(block_in, resamp_with_conv)
524
+ curr_res = curr_res * 2
525
+ self.up.insert(0, up) # prepend to get consistent order
526
+
527
+ # end
528
+ self.norm_out = Normalize(block_in)
529
+ self.conv_out = torch.nn.Conv2d(block_in,
530
+ out_ch,
531
+ kernel_size=3,
532
+ stride=1,
533
+ padding=1)
534
+
535
+ def forward(self, z):
536
+ #assert z.shape[1:] == self.z_shape[1:]
537
+ self.last_z_shape = z.shape
538
+
539
+ # timestep embedding
540
+ temb = None
541
+
542
+ # z to block_in
543
+ h = self.conv_in(z)
544
+
545
+ # middle
546
+ h = self.mid.block_1(h, temb)
547
+ h = self.mid.attn_1(h)
548
+ h = self.mid.block_2(h, temb)
549
+
550
+ # upsampling
551
+ for i_level in reversed(range(self.num_resolutions)):
552
+ for i_block in range(self.num_res_blocks+1):
553
+ h = self.up[i_level].block[i_block](h, temb)
554
+ if len(self.up[i_level].attn) > 0:
555
+ h = self.up[i_level].attn[i_block](h)
556
+ if i_level != 0:
557
+ h = self.up[i_level].upsample(h)
558
+
559
+ # end
560
+ if self.give_pre_end:
561
+ return h
562
+
563
+ h = self.norm_out(h)
564
+ h = nonlinearity(h)
565
+ h = self.conv_out(h)
566
+ if self.tanh_out:
567
+ h = torch.tanh(h)
568
+ return h
569
+
570
+
571
+ class SimpleDecoder(nn.Module):
572
+ def __init__(self, in_channels, out_channels, *args, **kwargs):
573
+ super().__init__()
574
+ self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
575
+ ResnetBlock(in_channels=in_channels,
576
+ out_channels=2 * in_channels,
577
+ temb_channels=0, dropout=0.0),
578
+ ResnetBlock(in_channels=2 * in_channels,
579
+ out_channels=4 * in_channels,
580
+ temb_channels=0, dropout=0.0),
581
+ ResnetBlock(in_channels=4 * in_channels,
582
+ out_channels=2 * in_channels,
583
+ temb_channels=0, dropout=0.0),
584
+ nn.Conv2d(2*in_channels, in_channels, 1),
585
+ Upsample(in_channels, with_conv=True)])
586
+ # end
587
+ self.norm_out = Normalize(in_channels)
588
+ self.conv_out = torch.nn.Conv2d(in_channels,
589
+ out_channels,
590
+ kernel_size=3,
591
+ stride=1,
592
+ padding=1)
593
+
594
+ def forward(self, x):
595
+ for i, layer in enumerate(self.model):
596
+ if i in [1,2,3]:
597
+ x = layer(x, None)
598
+ else:
599
+ x = layer(x)
600
+
601
+ h = self.norm_out(x)
602
+ h = nonlinearity(h)
603
+ x = self.conv_out(h)
604
+ return x
605
+
606
+
607
+ class UpsampleDecoder(nn.Module):
608
+ def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
609
+ ch_mult=(2,2), dropout=0.0):
610
+ super().__init__()
611
+ # upsampling
612
+ self.temb_ch = 0
613
+ self.num_resolutions = len(ch_mult)
614
+ self.num_res_blocks = num_res_blocks
615
+ block_in = in_channels
616
+ curr_res = resolution // 2 ** (self.num_resolutions - 1)
617
+ self.res_blocks = nn.ModuleList()
618
+ self.upsample_blocks = nn.ModuleList()
619
+ for i_level in range(self.num_resolutions):
620
+ res_block = []
621
+ block_out = ch * ch_mult[i_level]
622
+ for i_block in range(self.num_res_blocks + 1):
623
+ res_block.append(ResnetBlock(in_channels=block_in,
624
+ out_channels=block_out,
625
+ temb_channels=self.temb_ch,
626
+ dropout=dropout))
627
+ block_in = block_out
628
+ self.res_blocks.append(nn.ModuleList(res_block))
629
+ if i_level != self.num_resolutions - 1:
630
+ self.upsample_blocks.append(Upsample(block_in, True))
631
+ curr_res = curr_res * 2
632
+
633
+ # end
634
+ self.norm_out = Normalize(block_in)
635
+ self.conv_out = torch.nn.Conv2d(block_in,
636
+ out_channels,
637
+ kernel_size=3,
638
+ stride=1,
639
+ padding=1)
640
+
641
+ def forward(self, x):
642
+ # upsampling
643
+ h = x
644
+ for k, i_level in enumerate(range(self.num_resolutions)):
645
+ for i_block in range(self.num_res_blocks + 1):
646
+ h = self.res_blocks[i_level][i_block](h, None)
647
+ if i_level != self.num_resolutions - 1:
648
+ h = self.upsample_blocks[k](h)
649
+ h = self.norm_out(h)
650
+ h = nonlinearity(h)
651
+ h = self.conv_out(h)
652
+ return h
653
+
654
+
655
+ class LatentRescaler(nn.Module):
656
+ def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
657
+ super().__init__()
658
+ # residual block, interpolate, residual block
659
+ self.factor = factor
660
+ self.conv_in = nn.Conv2d(in_channels,
661
+ mid_channels,
662
+ kernel_size=3,
663
+ stride=1,
664
+ padding=1)
665
+ self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
666
+ out_channels=mid_channels,
667
+ temb_channels=0,
668
+ dropout=0.0) for _ in range(depth)])
669
+ self.attn = AttnBlock(mid_channels)
670
+ self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
671
+ out_channels=mid_channels,
672
+ temb_channels=0,
673
+ dropout=0.0) for _ in range(depth)])
674
+
675
+ self.conv_out = nn.Conv2d(mid_channels,
676
+ out_channels,
677
+ kernel_size=1,
678
+ )
679
+
680
+ def forward(self, x):
681
+ x = self.conv_in(x)
682
+ for block in self.res_block1:
683
+ x = block(x, None)
684
+ x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
685
+ x = self.attn(x)
686
+ for block in self.res_block2:
687
+ x = block(x, None)
688
+ x = self.conv_out(x)
689
+ return x
690
+
691
+
692
+ class MergedRescaleEncoder(nn.Module):
693
+ def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
694
+ attn_resolutions, dropout=0.0, resamp_with_conv=True,
695
+ ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
696
+ super().__init__()
697
+ intermediate_chn = ch * ch_mult[-1]
698
+ self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
699
+ z_channels=intermediate_chn, double_z=False, resolution=resolution,
700
+ attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
701
+ out_ch=None)
702
+ self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
703
+ mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
704
+
705
+ def forward(self, x):
706
+ x = self.encoder(x)
707
+ x = self.rescaler(x)
708
+ return x
709
+
710
+
711
+ class MergedRescaleDecoder(nn.Module):
712
+ def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
713
+ dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
714
+ super().__init__()
715
+ tmp_chn = z_channels*ch_mult[-1]
716
+ self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
717
+ resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
718
+ ch_mult=ch_mult, resolution=resolution, ch=ch)
719
+ self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
720
+ out_channels=tmp_chn, depth=rescale_module_depth)
721
+
722
+ def forward(self, x):
723
+ x = self.rescaler(x)
724
+ x = self.decoder(x)
725
+ return x
726
+
727
+
728
+ class Upsampler(nn.Module):
729
+ def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
730
+ super().__init__()
731
+ assert out_size >= in_size
732
+ num_blocks = int(np.log2(out_size//in_size))+1
733
+ factor_up = 1.+ (out_size % in_size)
734
+ print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
735
+ self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
736
+ out_channels=in_channels)
737
+ self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
738
+ attn_resolutions=[], in_channels=None, ch=in_channels,
739
+ ch_mult=[ch_mult for _ in range(num_blocks)])
740
+
741
+ def forward(self, x):
742
+ x = self.rescaler(x)
743
+ x = self.decoder(x)
744
+ return x
745
+
746
+
747
+ class Resize(nn.Module):
748
+ def __init__(self, in_channels=None, learned=False, mode="bilinear"):
749
+ super().__init__()
750
+ self.with_conv = learned
751
+ self.mode = mode
752
+ if self.with_conv:
753
+ print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
754
+ raise NotImplementedError()
755
+ assert in_channels is not None
756
+ # no asymmetric padding in torch conv, must do it ourselves
757
+ self.conv = torch.nn.Conv2d(in_channels,
758
+ in_channels,
759
+ kernel_size=4,
760
+ stride=2,
761
+ padding=1)
762
+
763
+ def forward(self, x, scale_factor=1.0):
764
+ if scale_factor==1.0:
765
+ return x
766
+ else:
767
+ x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
768
+ return x
769
+
770
+ class FirstStagePostProcessor(nn.Module):
771
+
772
+ def __init__(self, ch_mult:list, in_channels,
773
+ pretrained_model:nn.Module=None,
774
+ reshape=False,
775
+ n_channels=None,
776
+ dropout=0.,
777
+ pretrained_config=None):
778
+ super().__init__()
779
+ if pretrained_config is None:
780
+ assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
781
+ self.pretrained_model = pretrained_model
782
+ else:
783
+ assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
784
+ self.instantiate_pretrained(pretrained_config)
785
+
786
+ self.do_reshape = reshape
787
+
788
+ if n_channels is None:
789
+ n_channels = self.pretrained_model.encoder.ch
790
+
791
+ self.proj_norm = Normalize(in_channels,num_groups=in_channels//2)
792
+ self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3,
793
+ stride=1,padding=1)
794
+
795
+ blocks = []
796
+ downs = []
797
+ ch_in = n_channels
798
+ for m in ch_mult:
799
+ blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout))
800
+ ch_in = m * n_channels
801
+ downs.append(Downsample(ch_in, with_conv=False))
802
+
803
+ self.model = nn.ModuleList(blocks)
804
+ self.downsampler = nn.ModuleList(downs)
805
+
806
+
807
+ def instantiate_pretrained(self, config):
808
+ model = instantiate_from_config(config)
809
+ self.pretrained_model = model.eval()
810
+ # self.pretrained_model.train = False
811
+ for param in self.pretrained_model.parameters():
812
+ param.requires_grad = False
813
+
814
+
815
+ @torch.no_grad()
816
+ def encode_with_pretrained(self,x):
817
+ c = self.pretrained_model.encode(x)
818
+ if isinstance(c, DiagonalGaussianDistribution):
819
+ c = c.mode()
820
+ return c
821
+
822
+ def forward(self,x):
823
+ z_fs = self.encode_with_pretrained(x)
824
+ z = self.proj_norm(z_fs)
825
+ z = self.proj(z)
826
+ z = nonlinearity(z)
827
+
828
+ for submodel, downmodel in zip(self.model,self.downsampler):
829
+ z = submodel(z,temb=None)
830
+ z = downmodel(z)
831
+
832
+ if self.do_reshape:
833
+ z = rearrange(z,'b c h w -> b (h w) c')
834
+ return z
835
+
3DTopia/ldm/modules/diffusionmodules/openaimodel.py ADDED
@@ -0,0 +1,965 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ from functools import partial
3
+ import math
4
+ from typing import Iterable
5
+
6
+ import numpy as np
7
+ import torch as th
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+
11
+ from ldm.modules.diffusionmodules.util import (
12
+ checkpoint,
13
+ conv_nd,
14
+ linear,
15
+ avg_pool_nd,
16
+ zero_module,
17
+ normalization,
18
+ timestep_embedding,
19
+ )
20
+ from ldm.modules.attention import SpatialTransformer
21
+
22
+
23
+ # dummy replace
24
+ def convert_module_to_f16(x):
25
+ pass
26
+
27
+ def convert_module_to_f32(x):
28
+ pass
29
+
30
+
31
+ ## go
32
+ class AttentionPool2d(nn.Module):
33
+ """
34
+ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
35
+ """
36
+
37
+ def __init__(
38
+ self,
39
+ spacial_dim: int,
40
+ embed_dim: int,
41
+ num_heads_channels: int,
42
+ output_dim: int = None,
43
+ ):
44
+ super().__init__()
45
+ self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
46
+ self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
47
+ self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
48
+ self.num_heads = embed_dim // num_heads_channels
49
+ self.attention = QKVAttention(self.num_heads)
50
+
51
+ def forward(self, x):
52
+ b, c, *_spatial = x.shape
53
+ x = x.reshape(b, c, -1) # NC(HW)
54
+ x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
55
+ x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
56
+ x = self.qkv_proj(x)
57
+ x = self.attention(x)
58
+ x = self.c_proj(x)
59
+ return x[:, :, 0]
60
+
61
+
62
+ class TimestepBlock(nn.Module):
63
+ """
64
+ Any module where forward() takes timestep embeddings as a second argument.
65
+ """
66
+
67
+ @abstractmethod
68
+ def forward(self, x, emb):
69
+ """
70
+ Apply the module to `x` given `emb` timestep embeddings.
71
+ """
72
+
73
+
74
+ class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
75
+ """
76
+ A sequential module that passes timestep embeddings to the children that
77
+ support it as an extra input.
78
+ """
79
+
80
+ def forward(self, x, emb, context=None):
81
+ for layer in self:
82
+ if isinstance(layer, TimestepBlock):
83
+ x = layer(x, emb)
84
+ elif isinstance(layer, SpatialTransformer):
85
+ x = layer(x, context)
86
+ else:
87
+ x = layer(x)
88
+ return x
89
+
90
+
91
+ class Upsample(nn.Module):
92
+ """
93
+ An upsampling layer with an optional convolution.
94
+ :param channels: channels in the inputs and outputs.
95
+ :param use_conv: a bool determining if a convolution is applied.
96
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
97
+ upsampling occurs in the inner-two dimensions.
98
+ """
99
+
100
+ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
101
+ super().__init__()
102
+ self.channels = channels
103
+ self.out_channels = out_channels or channels
104
+ self.use_conv = use_conv
105
+ self.dims = dims
106
+ if use_conv:
107
+ self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
108
+
109
+ def forward(self, x):
110
+ assert x.shape[1] == self.channels
111
+ if self.dims == 3:
112
+ x = F.interpolate(
113
+ x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
114
+ )
115
+ else:
116
+ x = F.interpolate(x, scale_factor=2, mode="nearest")
117
+ if self.use_conv:
118
+ x = self.conv(x)
119
+ return x
120
+
121
+ class TransposedUpsample(nn.Module):
122
+ 'Learned 2x upsampling without padding'
123
+ def __init__(self, channels, out_channels=None, ks=5):
124
+ super().__init__()
125
+ self.channels = channels
126
+ self.out_channels = out_channels or channels
127
+
128
+ self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
129
+
130
+ def forward(self,x):
131
+ return self.up(x)
132
+
133
+
134
+ class Downsample(nn.Module):
135
+ """
136
+ A downsampling layer with an optional convolution.
137
+ :param channels: channels in the inputs and outputs.
138
+ :param use_conv: a bool determining if a convolution is applied.
139
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
140
+ downsampling occurs in the inner-two dimensions.
141
+ """
142
+
143
+ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
144
+ super().__init__()
145
+ self.channels = channels
146
+ self.out_channels = out_channels or channels
147
+ self.use_conv = use_conv
148
+ self.dims = dims
149
+ stride = 2 if dims != 3 else (1, 2, 2)
150
+ if use_conv:
151
+ self.op = conv_nd(
152
+ dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
153
+ )
154
+ else:
155
+ assert self.channels == self.out_channels
156
+ self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
157
+
158
+ def forward(self, x):
159
+ assert x.shape[1] == self.channels
160
+ return self.op(x)
161
+
162
+
163
+ class ResBlock(TimestepBlock):
164
+ """
165
+ A residual block that can optionally change the number of channels.
166
+ :param channels: the number of input channels.
167
+ :param emb_channels: the number of timestep embedding channels.
168
+ :param dropout: the rate of dropout.
169
+ :param out_channels: if specified, the number of out channels.
170
+ :param use_conv: if True and out_channels is specified, use a spatial
171
+ convolution instead of a smaller 1x1 convolution to change the
172
+ channels in the skip connection.
173
+ :param dims: determines if the signal is 1D, 2D, or 3D.
174
+ :param use_checkpoint: if True, use gradient checkpointing on this module.
175
+ :param up: if True, use this block for upsampling.
176
+ :param down: if True, use this block for downsampling.
177
+ """
178
+
179
+ def __init__(
180
+ self,
181
+ channels,
182
+ emb_channels,
183
+ dropout,
184
+ out_channels=None,
185
+ use_conv=False,
186
+ use_scale_shift_norm=False,
187
+ dims=2,
188
+ use_checkpoint=False,
189
+ up=False,
190
+ down=False,
191
+ ):
192
+ super().__init__()
193
+ self.channels = channels
194
+ self.emb_channels = emb_channels
195
+ self.dropout = dropout
196
+ self.out_channels = out_channels or channels
197
+ self.use_conv = use_conv
198
+ self.use_checkpoint = use_checkpoint
199
+ self.use_scale_shift_norm = use_scale_shift_norm
200
+
201
+ self.in_layers = nn.Sequential(
202
+ normalization(channels),
203
+ nn.SiLU(),
204
+ conv_nd(dims, channels, self.out_channels, 3, padding=1),
205
+ )
206
+
207
+ self.updown = up or down
208
+
209
+ if up:
210
+ self.h_upd = Upsample(channels, False, dims)
211
+ self.x_upd = Upsample(channels, False, dims)
212
+ elif down:
213
+ self.h_upd = Downsample(channels, False, dims)
214
+ self.x_upd = Downsample(channels, False, dims)
215
+ else:
216
+ self.h_upd = self.x_upd = nn.Identity()
217
+
218
+ self.emb_layers = nn.Sequential(
219
+ nn.SiLU(),
220
+ linear(
221
+ emb_channels,
222
+ 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
223
+ ),
224
+ )
225
+ self.out_layers = nn.Sequential(
226
+ normalization(self.out_channels),
227
+ nn.SiLU(),
228
+ nn.Dropout(p=dropout),
229
+ zero_module(
230
+ conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
231
+ ),
232
+ )
233
+
234
+ if self.out_channels == channels:
235
+ self.skip_connection = nn.Identity()
236
+ elif use_conv:
237
+ self.skip_connection = conv_nd(
238
+ dims, channels, self.out_channels, 3, padding=1
239
+ )
240
+ else:
241
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
242
+
243
+ def forward(self, x, emb):
244
+ """
245
+ Apply the block to a Tensor, conditioned on a timestep embedding.
246
+ :param x: an [N x C x ...] Tensor of features.
247
+ :param emb: an [N x emb_channels] Tensor of timestep embeddings.
248
+ :return: an [N x C x ...] Tensor of outputs.
249
+ """
250
+ return checkpoint(
251
+ self._forward, (x, emb), self.parameters(), self.use_checkpoint
252
+ )
253
+
254
+
255
+ def _forward(self, x, emb):
256
+ if self.updown:
257
+ in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
258
+ h = in_rest(x)
259
+ h = self.h_upd(h)
260
+ x = self.x_upd(x)
261
+ h = in_conv(h)
262
+ else:
263
+ h = self.in_layers(x)
264
+ emb_out = self.emb_layers(emb).type(h.dtype)
265
+ while len(emb_out.shape) < len(h.shape):
266
+ emb_out = emb_out[..., None]
267
+ if self.use_scale_shift_norm:
268
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
269
+ scale, shift = th.chunk(emb_out, 2, dim=1)
270
+ h = out_norm(h) * (1 + scale) + shift
271
+ h = out_rest(h)
272
+ else:
273
+ h = h + emb_out
274
+ h = self.out_layers(h)
275
+ return self.skip_connection(x) + h
276
+
277
+
278
+ class AttentionBlock(nn.Module):
279
+ """
280
+ An attention block that allows spatial positions to attend to each other.
281
+ Originally ported from here, but adapted to the N-d case.
282
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
283
+ """
284
+
285
+ def __init__(
286
+ self,
287
+ channels,
288
+ num_heads=1,
289
+ num_head_channels=-1,
290
+ use_checkpoint=False,
291
+ use_new_attention_order=False,
292
+ ):
293
+ super().__init__()
294
+ self.channels = channels
295
+ if num_head_channels == -1:
296
+ self.num_heads = num_heads
297
+ else:
298
+ assert (
299
+ channels % num_head_channels == 0
300
+ ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
301
+ self.num_heads = channels // num_head_channels
302
+ self.use_checkpoint = use_checkpoint
303
+ self.norm = normalization(channels)
304
+ self.qkv = conv_nd(1, channels, channels * 3, 1)
305
+ if use_new_attention_order:
306
+ # split qkv before split heads
307
+ self.attention = QKVAttention(self.num_heads)
308
+ else:
309
+ # split heads before split qkv
310
+ self.attention = QKVAttentionLegacy(self.num_heads)
311
+
312
+ self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
313
+
314
+ def forward(self, x):
315
+ return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
316
+ #return pt_checkpoint(self._forward, x) # pytorch
317
+
318
+ def _forward(self, x):
319
+ b, c, *spatial = x.shape
320
+ x = x.reshape(b, c, -1)
321
+ qkv = self.qkv(self.norm(x))
322
+ h = self.attention(qkv)
323
+ h = self.proj_out(h)
324
+ return (x + h).reshape(b, c, *spatial)
325
+
326
+
327
+ def count_flops_attn(model, _x, y):
328
+ """
329
+ A counter for the `thop` package to count the operations in an
330
+ attention operation.
331
+ Meant to be used like:
332
+ macs, params = thop.profile(
333
+ model,
334
+ inputs=(inputs, timestamps),
335
+ custom_ops={QKVAttention: QKVAttention.count_flops},
336
+ )
337
+ """
338
+ b, c, *spatial = y[0].shape
339
+ num_spatial = int(np.prod(spatial))
340
+ # We perform two matmuls with the same number of ops.
341
+ # The first computes the weight matrix, the second computes
342
+ # the combination of the value vectors.
343
+ matmul_ops = 2 * b * (num_spatial ** 2) * c
344
+ model.total_ops += th.DoubleTensor([matmul_ops])
345
+
346
+
347
+ class QKVAttentionLegacy(nn.Module):
348
+ """
349
+ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
350
+ """
351
+
352
+ def __init__(self, n_heads):
353
+ super().__init__()
354
+ self.n_heads = n_heads
355
+
356
+ def forward(self, qkv):
357
+ """
358
+ Apply QKV attention.
359
+ :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
360
+ :return: an [N x (H * C) x T] tensor after attention.
361
+ """
362
+ bs, width, length = qkv.shape
363
+ assert width % (3 * self.n_heads) == 0
364
+ ch = width // (3 * self.n_heads)
365
+ q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
366
+ scale = 1 / math.sqrt(math.sqrt(ch))
367
+ weight = th.einsum(
368
+ "bct,bcs->bts", q * scale, k * scale
369
+ ) # More stable with f16 than dividing afterwards
370
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
371
+ a = th.einsum("bts,bcs->bct", weight, v)
372
+ return a.reshape(bs, -1, length)
373
+
374
+ @staticmethod
375
+ def count_flops(model, _x, y):
376
+ return count_flops_attn(model, _x, y)
377
+
378
+
379
+ class QKVAttention(nn.Module):
380
+ """
381
+ A module which performs QKV attention and splits in a different order.
382
+ """
383
+
384
+ def __init__(self, n_heads):
385
+ super().__init__()
386
+ self.n_heads = n_heads
387
+
388
+ def forward(self, qkv):
389
+ """
390
+ Apply QKV attention.
391
+ :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
392
+ :return: an [N x (H * C) x T] tensor after attention.
393
+ """
394
+ bs, width, length = qkv.shape
395
+ assert width % (3 * self.n_heads) == 0
396
+ ch = width // (3 * self.n_heads)
397
+ q, k, v = qkv.chunk(3, dim=1)
398
+ scale = 1 / math.sqrt(math.sqrt(ch))
399
+ weight = th.einsum(
400
+ "bct,bcs->bts",
401
+ (q * scale).view(bs * self.n_heads, ch, length),
402
+ (k * scale).view(bs * self.n_heads, ch, length),
403
+ ) # More stable with f16 than dividing afterwards
404
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
405
+ a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
406
+ return a.reshape(bs, -1, length)
407
+
408
+ @staticmethod
409
+ def count_flops(model, _x, y):
410
+ return count_flops_attn(model, _x, y)
411
+
412
+
413
+ class UNetModel(nn.Module):
414
+ """
415
+ The full UNet model with attention and timestep embedding.
416
+ :param in_channels: channels in the input Tensor.
417
+ :param model_channels: base channel count for the model.
418
+ :param out_channels: channels in the output Tensor.
419
+ :param num_res_blocks: number of residual blocks per downsample.
420
+ :param attention_resolutions: a collection of downsample rates at which
421
+ attention will take place. May be a set, list, or tuple.
422
+ For example, if this contains 4, then at 4x downsampling, attention
423
+ will be used.
424
+ :param dropout: the dropout probability.
425
+ :param channel_mult: channel multiplier for each level of the UNet.
426
+ :param conv_resample: if True, use learned convolutions for upsampling and
427
+ downsampling.
428
+ :param dims: determines if the signal is 1D, 2D, or 3D.
429
+ :param num_classes: if specified (as an int), then this model will be
430
+ class-conditional with `num_classes` classes.
431
+ :param use_checkpoint: use gradient checkpointing to reduce memory usage.
432
+ :param num_heads: the number of attention heads in each attention layer.
433
+ :param num_heads_channels: if specified, ignore num_heads and instead use
434
+ a fixed channel width per attention head.
435
+ :param num_heads_upsample: works with num_heads to set a different number
436
+ of heads for upsampling. Deprecated.
437
+ :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
438
+ :param resblock_updown: use residual blocks for up/downsampling.
439
+ :param use_new_attention_order: use a different attention pattern for potentially
440
+ increased efficiency.
441
+ """
442
+
443
+ def __init__(
444
+ self,
445
+ image_size,
446
+ in_channels,
447
+ model_channels,
448
+ out_channels,
449
+ num_res_blocks,
450
+ attention_resolutions,
451
+ dropout=0,
452
+ channel_mult=(1, 2, 4, 8),
453
+ conv_resample=True,
454
+ dims=2,
455
+ num_classes=None,
456
+ use_checkpoint=False,
457
+ use_fp16=False,
458
+ num_heads=-1,
459
+ num_head_channels=-1,
460
+ num_heads_upsample=-1,
461
+ use_scale_shift_norm=False,
462
+ resblock_updown=False,
463
+ use_new_attention_order=False,
464
+ use_spatial_transformer=False, # custom transformer support
465
+ transformer_depth=1, # custom transformer support
466
+ context_dim=None, # custom transformer support
467
+ n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
468
+ legacy=True,
469
+ ):
470
+ super().__init__()
471
+ if use_spatial_transformer:
472
+ assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
473
+
474
+ if context_dim is not None:
475
+ assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
476
+ from omegaconf.listconfig import ListConfig
477
+ if type(context_dim) == ListConfig:
478
+ context_dim = list(context_dim)
479
+
480
+ if num_heads_upsample == -1:
481
+ num_heads_upsample = num_heads
482
+
483
+ if num_heads == -1:
484
+ assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
485
+
486
+ if num_head_channels == -1:
487
+ assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
488
+
489
+ self.image_size = image_size
490
+ self.in_channels = in_channels
491
+ self.model_channels = model_channels
492
+ self.out_channels = out_channels
493
+ self.num_res_blocks = num_res_blocks
494
+ self.attention_resolutions = attention_resolutions
495
+ self.dropout = dropout
496
+ self.channel_mult = channel_mult
497
+ self.conv_resample = conv_resample
498
+ self.num_classes = num_classes
499
+ self.use_checkpoint = use_checkpoint
500
+ self.dtype = th.float16 if use_fp16 else th.float32
501
+ self.num_heads = num_heads
502
+ self.num_head_channels = num_head_channels
503
+ self.num_heads_upsample = num_heads_upsample
504
+ self.predict_codebook_ids = n_embed is not None
505
+
506
+ time_embed_dim = model_channels * 4
507
+ self.time_embed = nn.Sequential(
508
+ linear(model_channels, time_embed_dim),
509
+ nn.SiLU(),
510
+ linear(time_embed_dim, time_embed_dim),
511
+ )
512
+
513
+ if self.num_classes is not None:
514
+ self.label_emb = nn.Embedding(num_classes, time_embed_dim)
515
+
516
+ self.input_blocks = nn.ModuleList(
517
+ [
518
+ TimestepEmbedSequential(
519
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
520
+ )
521
+ ]
522
+ )
523
+ self._feature_size = model_channels
524
+ input_block_chans = [model_channels]
525
+ ch = model_channels
526
+ ds = 1
527
+ for level, mult in enumerate(channel_mult):
528
+ for _ in range(num_res_blocks):
529
+ layers = [
530
+ ResBlock(
531
+ ch,
532
+ time_embed_dim,
533
+ dropout,
534
+ out_channels=mult * model_channels,
535
+ dims=dims,
536
+ use_checkpoint=use_checkpoint,
537
+ use_scale_shift_norm=use_scale_shift_norm,
538
+ )
539
+ ]
540
+ ch = mult * model_channels
541
+ if ds in attention_resolutions:
542
+ if num_head_channels == -1:
543
+ dim_head = ch // num_heads
544
+ else:
545
+ num_heads = ch // num_head_channels
546
+ dim_head = num_head_channels
547
+ if legacy:
548
+ #num_heads = 1
549
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
550
+ layers.append(
551
+ AttentionBlock(
552
+ ch,
553
+ use_checkpoint=use_checkpoint,
554
+ num_heads=num_heads,
555
+ num_head_channels=dim_head,
556
+ use_new_attention_order=use_new_attention_order,
557
+ ) if not use_spatial_transformer else SpatialTransformer(
558
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
559
+ )
560
+ )
561
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
562
+ self._feature_size += ch
563
+ input_block_chans.append(ch)
564
+ if level != len(channel_mult) - 1:
565
+ out_ch = ch
566
+ self.input_blocks.append(
567
+ TimestepEmbedSequential(
568
+ ResBlock(
569
+ ch,
570
+ time_embed_dim,
571
+ dropout,
572
+ out_channels=out_ch,
573
+ dims=dims,
574
+ use_checkpoint=use_checkpoint,
575
+ use_scale_shift_norm=use_scale_shift_norm,
576
+ down=True,
577
+ )
578
+ if resblock_updown
579
+ else Downsample(
580
+ ch, conv_resample, dims=dims, out_channels=out_ch
581
+ )
582
+ )
583
+ )
584
+ ch = out_ch
585
+ input_block_chans.append(ch)
586
+ ds *= 2
587
+ self._feature_size += ch
588
+
589
+ if num_head_channels == -1:
590
+ dim_head = ch // num_heads
591
+ else:
592
+ num_heads = ch // num_head_channels
593
+ dim_head = num_head_channels
594
+ if legacy:
595
+ #num_heads = 1
596
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
597
+ self.middle_block = TimestepEmbedSequential(
598
+ ResBlock(
599
+ ch,
600
+ time_embed_dim,
601
+ dropout,
602
+ dims=dims,
603
+ use_checkpoint=use_checkpoint,
604
+ use_scale_shift_norm=use_scale_shift_norm,
605
+ ),
606
+ AttentionBlock(
607
+ ch,
608
+ use_checkpoint=use_checkpoint,
609
+ num_heads=num_heads,
610
+ num_head_channels=dim_head,
611
+ use_new_attention_order=use_new_attention_order,
612
+ ) if not use_spatial_transformer else SpatialTransformer(
613
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
614
+ ),
615
+ ResBlock(
616
+ ch,
617
+ time_embed_dim,
618
+ dropout,
619
+ dims=dims,
620
+ use_checkpoint=use_checkpoint,
621
+ use_scale_shift_norm=use_scale_shift_norm,
622
+ ),
623
+ )
624
+ self._feature_size += ch
625
+
626
+ self.output_blocks = nn.ModuleList([])
627
+ for level, mult in list(enumerate(channel_mult))[::-1]:
628
+ for i in range(num_res_blocks + 1):
629
+ ich = input_block_chans.pop()
630
+ layers = [
631
+ ResBlock(
632
+ ch + ich,
633
+ time_embed_dim,
634
+ dropout,
635
+ out_channels=model_channels * mult,
636
+ dims=dims,
637
+ use_checkpoint=use_checkpoint,
638
+ use_scale_shift_norm=use_scale_shift_norm,
639
+ )
640
+ ]
641
+ ch = model_channels * mult
642
+ if ds in attention_resolutions:
643
+ if num_head_channels == -1:
644
+ dim_head = ch // num_heads
645
+ else:
646
+ num_heads = ch // num_head_channels
647
+ dim_head = num_head_channels
648
+ if legacy:
649
+ #num_heads = 1
650
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
651
+ layers.append(
652
+ AttentionBlock(
653
+ ch,
654
+ use_checkpoint=use_checkpoint,
655
+ num_heads=num_heads_upsample,
656
+ num_head_channels=dim_head,
657
+ use_new_attention_order=use_new_attention_order,
658
+ ) if not use_spatial_transformer else SpatialTransformer(
659
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
660
+ )
661
+ )
662
+ if level and i == num_res_blocks:
663
+ out_ch = ch
664
+ layers.append(
665
+ ResBlock(
666
+ ch,
667
+ time_embed_dim,
668
+ dropout,
669
+ out_channels=out_ch,
670
+ dims=dims,
671
+ use_checkpoint=use_checkpoint,
672
+ use_scale_shift_norm=use_scale_shift_norm,
673
+ up=True,
674
+ )
675
+ if resblock_updown
676
+ else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
677
+ )
678
+ ds //= 2
679
+ self.output_blocks.append(TimestepEmbedSequential(*layers))
680
+ self._feature_size += ch
681
+
682
+ self.out = nn.Sequential(
683
+ normalization(ch),
684
+ nn.SiLU(),
685
+ zero_module(conv_nd(dims, ch, out_channels, 3, padding=1)),
686
+ )
687
+ if self.predict_codebook_ids:
688
+ self.id_predictor = nn.Sequential(
689
+ normalization(ch),
690
+ conv_nd(dims, ch, n_embed, 1),
691
+ #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
692
+ )
693
+
694
+ def convert_to_fp16(self):
695
+ """
696
+ Convert the torso of the model to float16.
697
+ """
698
+ self.input_blocks.apply(convert_module_to_f16)
699
+ self.middle_block.apply(convert_module_to_f16)
700
+ self.output_blocks.apply(convert_module_to_f16)
701
+
702
+ def convert_to_fp32(self):
703
+ """
704
+ Convert the torso of the model to float32.
705
+ """
706
+ self.input_blocks.apply(convert_module_to_f32)
707
+ self.middle_block.apply(convert_module_to_f32)
708
+ self.output_blocks.apply(convert_module_to_f32)
709
+
710
+ def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
711
+ """
712
+ Apply the model to an input batch.
713
+ :param x: an [N x C x ...] Tensor of inputs.
714
+ :param timesteps: a 1-D batch of timesteps.
715
+ :param context: conditioning plugged in via crossattn
716
+ :param y: an [N] Tensor of labels, if class-conditional.
717
+ :return: an [N x C x ...] Tensor of outputs.
718
+ """
719
+ assert (y is not None) == (
720
+ self.num_classes is not None
721
+ ), "must specify y if and only if the model is class-conditional"
722
+ hs = []
723
+ t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
724
+ emb = self.time_embed(t_emb)
725
+
726
+ if self.num_classes is not None:
727
+ assert y.shape == (x.shape[0],)
728
+ emb = emb + self.label_emb(y)
729
+
730
+ h = x.type(self.dtype)
731
+ for module in self.input_blocks:
732
+ h = module(h, emb, context)
733
+ hs.append(h)
734
+ h = self.middle_block(h, emb, context)
735
+ for module in self.output_blocks:
736
+ # if h.shape[1] == 640:
737
+ # # h[:, :320] = h[:, :320] * 1.4
738
+ # # print("here")
739
+ # h = h * 1.2
740
+ h = th.cat([h, hs.pop()], dim=1)
741
+ h = module(h, emb, context)
742
+ h = h.type(x.dtype)
743
+ if self.predict_codebook_ids:
744
+ return self.id_predictor(h)
745
+ else:
746
+ return self.out(h)
747
+
748
+
749
+ class EncoderUNetModel(nn.Module):
750
+ """
751
+ The half UNet model with attention and timestep embedding.
752
+ For usage, see UNet.
753
+ """
754
+
755
+ def __init__(
756
+ self,
757
+ image_size,
758
+ in_channels,
759
+ model_channels,
760
+ out_channels,
761
+ num_res_blocks,
762
+ attention_resolutions,
763
+ dropout=0,
764
+ channel_mult=(1, 2, 4, 8),
765
+ conv_resample=True,
766
+ dims=2,
767
+ use_checkpoint=False,
768
+ use_fp16=False,
769
+ num_heads=1,
770
+ num_head_channels=-1,
771
+ num_heads_upsample=-1,
772
+ use_scale_shift_norm=False,
773
+ resblock_updown=False,
774
+ use_new_attention_order=False,
775
+ pool="adaptive",
776
+ *args,
777
+ **kwargs
778
+ ):
779
+ super().__init__()
780
+
781
+ if num_heads_upsample == -1:
782
+ num_heads_upsample = num_heads
783
+
784
+ self.in_channels = in_channels
785
+ self.model_channels = model_channels
786
+ self.out_channels = out_channels
787
+ self.num_res_blocks = num_res_blocks
788
+ self.attention_resolutions = attention_resolutions
789
+ self.dropout = dropout
790
+ self.channel_mult = channel_mult
791
+ self.conv_resample = conv_resample
792
+ self.use_checkpoint = use_checkpoint
793
+ self.dtype = th.float16 if use_fp16 else th.float32
794
+ self.num_heads = num_heads
795
+ self.num_head_channels = num_head_channels
796
+ self.num_heads_upsample = num_heads_upsample
797
+
798
+ time_embed_dim = model_channels * 4
799
+ self.time_embed = nn.Sequential(
800
+ linear(model_channels, time_embed_dim),
801
+ nn.SiLU(),
802
+ linear(time_embed_dim, time_embed_dim),
803
+ )
804
+
805
+ self.input_blocks = nn.ModuleList(
806
+ [
807
+ TimestepEmbedSequential(
808
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
809
+ )
810
+ ]
811
+ )
812
+ self._feature_size = model_channels
813
+ input_block_chans = [model_channels]
814
+ ch = model_channels
815
+ ds = 1
816
+ for level, mult in enumerate(channel_mult):
817
+ for _ in range(num_res_blocks):
818
+ layers = [
819
+ ResBlock(
820
+ ch,
821
+ time_embed_dim,
822
+ dropout,
823
+ out_channels=mult * model_channels,
824
+ dims=dims,
825
+ use_checkpoint=use_checkpoint,
826
+ use_scale_shift_norm=use_scale_shift_norm,
827
+ )
828
+ ]
829
+ ch = mult * model_channels
830
+ if ds in attention_resolutions:
831
+ layers.append(
832
+ AttentionBlock(
833
+ ch,
834
+ use_checkpoint=use_checkpoint,
835
+ num_heads=num_heads,
836
+ num_head_channels=num_head_channels,
837
+ use_new_attention_order=use_new_attention_order,
838
+ )
839
+ )
840
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
841
+ self._feature_size += ch
842
+ input_block_chans.append(ch)
843
+ if level != len(channel_mult) - 1:
844
+ out_ch = ch
845
+ self.input_blocks.append(
846
+ TimestepEmbedSequential(
847
+ ResBlock(
848
+ ch,
849
+ time_embed_dim,
850
+ dropout,
851
+ out_channels=out_ch,
852
+ dims=dims,
853
+ use_checkpoint=use_checkpoint,
854
+ use_scale_shift_norm=use_scale_shift_norm,
855
+ down=True,
856
+ )
857
+ if resblock_updown
858
+ else Downsample(
859
+ ch, conv_resample, dims=dims, out_channels=out_ch
860
+ )
861
+ )
862
+ )
863
+ ch = out_ch
864
+ input_block_chans.append(ch)
865
+ ds *= 2
866
+ self._feature_size += ch
867
+
868
+ self.middle_block = TimestepEmbedSequential(
869
+ ResBlock(
870
+ ch,
871
+ time_embed_dim,
872
+ dropout,
873
+ dims=dims,
874
+ use_checkpoint=use_checkpoint,
875
+ use_scale_shift_norm=use_scale_shift_norm,
876
+ ),
877
+ AttentionBlock(
878
+ ch,
879
+ use_checkpoint=use_checkpoint,
880
+ num_heads=num_heads,
881
+ num_head_channels=num_head_channels,
882
+ use_new_attention_order=use_new_attention_order,
883
+ ),
884
+ ResBlock(
885
+ ch,
886
+ time_embed_dim,
887
+ dropout,
888
+ dims=dims,
889
+ use_checkpoint=use_checkpoint,
890
+ use_scale_shift_norm=use_scale_shift_norm,
891
+ ),
892
+ )
893
+ self._feature_size += ch
894
+ self.pool = pool
895
+ if pool == "adaptive":
896
+ self.out = nn.Sequential(
897
+ normalization(ch),
898
+ nn.SiLU(),
899
+ nn.AdaptiveAvgPool2d((1, 1)),
900
+ zero_module(conv_nd(dims, ch, out_channels, 1)),
901
+ nn.Flatten(),
902
+ )
903
+ elif pool == "attention":
904
+ assert num_head_channels != -1
905
+ self.out = nn.Sequential(
906
+ normalization(ch),
907
+ nn.SiLU(),
908
+ AttentionPool2d(
909
+ (image_size // ds), ch, num_head_channels, out_channels
910
+ ),
911
+ )
912
+ elif pool == "spatial":
913
+ self.out = nn.Sequential(
914
+ nn.Linear(self._feature_size, 2048),
915
+ nn.ReLU(),
916
+ nn.Linear(2048, self.out_channels),
917
+ )
918
+ elif pool == "spatial_v2":
919
+ self.out = nn.Sequential(
920
+ nn.Linear(self._feature_size, 2048),
921
+ normalization(2048),
922
+ nn.SiLU(),
923
+ nn.Linear(2048, self.out_channels),
924
+ )
925
+ else:
926
+ raise NotImplementedError(f"Unexpected {pool} pooling")
927
+
928
+ def convert_to_fp16(self):
929
+ """
930
+ Convert the torso of the model to float16.
931
+ """
932
+ self.input_blocks.apply(convert_module_to_f16)
933
+ self.middle_block.apply(convert_module_to_f16)
934
+
935
+ def convert_to_fp32(self):
936
+ """
937
+ Convert the torso of the model to float32.
938
+ """
939
+ self.input_blocks.apply(convert_module_to_f32)
940
+ self.middle_block.apply(convert_module_to_f32)
941
+
942
+ def forward(self, x, timesteps):
943
+ """
944
+ Apply the model to an input batch.
945
+ :param x: an [N x C x ...] Tensor of inputs.
946
+ :param timesteps: a 1-D batch of timesteps.
947
+ :return: an [N x K] Tensor of outputs.
948
+ """
949
+ emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
950
+
951
+ results = []
952
+ h = x.type(self.dtype)
953
+ for module in self.input_blocks:
954
+ h = module(h, emb)
955
+ if self.pool.startswith("spatial"):
956
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
957
+ h = self.middle_block(h, emb)
958
+ if self.pool.startswith("spatial"):
959
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
960
+ h = th.cat(results, axis=-1)
961
+ return self.out(h)
962
+ else:
963
+ h = h.type(x.dtype)
964
+ return self.out(h)
965
+
3DTopia/ldm/modules/diffusionmodules/triplane_3daware_unet.py ADDED
@@ -0,0 +1,991 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ from functools import partial
3
+ import math
4
+ from typing import Iterable
5
+
6
+ import numpy as np
7
+ import torch as th
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+
11
+ from ldm.modules.diffusionmodules.util import (
12
+ checkpoint,
13
+ conv_nd,
14
+ linear,
15
+ avg_pool_nd,
16
+ zero_module,
17
+ normalization,
18
+ timestep_embedding,
19
+ )
20
+ from ldm.modules.attention import SpatialTransformer
21
+
22
+
23
+ # dummy replace
24
+ def convert_module_to_f16(x):
25
+ pass
26
+
27
+ def convert_module_to_f32(x):
28
+ pass
29
+
30
+
31
+ ## go
32
+ class AttentionPool2d(nn.Module):
33
+ """
34
+ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
35
+ """
36
+
37
+ def __init__(
38
+ self,
39
+ spacial_dim: int,
40
+ embed_dim: int,
41
+ num_heads_channels: int,
42
+ output_dim: int = None,
43
+ ):
44
+ super().__init__()
45
+ self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
46
+ self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
47
+ self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
48
+ self.num_heads = embed_dim // num_heads_channels
49
+ self.attention = QKVAttention(self.num_heads)
50
+
51
+ def forward(self, x):
52
+ b, c, *_spatial = x.shape
53
+ x = x.reshape(b, c, -1) # NC(HW)
54
+ x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
55
+ x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
56
+ x = self.qkv_proj(x)
57
+ x = self.attention(x)
58
+ x = self.c_proj(x)
59
+ return x[:, :, 0]
60
+
61
+
62
+ class TimestepBlock(nn.Module):
63
+ """
64
+ Any module where forward() takes timestep embeddings as a second argument.
65
+ """
66
+
67
+ @abstractmethod
68
+ def forward(self, x, emb):
69
+ """
70
+ Apply the module to `x` given `emb` timestep embeddings.
71
+ """
72
+
73
+
74
+ class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
75
+ """
76
+ A sequential module that passes timestep embeddings to the children that
77
+ support it as an extra input.
78
+ """
79
+
80
+ def forward(self, x, emb, context=None):
81
+ for layer in self:
82
+ if isinstance(layer, TimestepBlock):
83
+ x = layer(x, emb)
84
+ elif isinstance(layer, SpatialTransformer):
85
+ x = layer(x, context)
86
+ else:
87
+ x = layer(x)
88
+ return x
89
+
90
+
91
+ class Upsample(nn.Module):
92
+ """
93
+ An upsampling layer with an optional convolution.
94
+ :param channels: channels in the inputs and outputs.
95
+ :param use_conv: a bool determining if a convolution is applied.
96
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
97
+ upsampling occurs in the inner-two dimensions.
98
+ """
99
+
100
+ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
101
+ super().__init__()
102
+ self.channels = channels
103
+ self.out_channels = out_channels or channels
104
+ self.use_conv = use_conv
105
+ self.dims = dims
106
+ if use_conv:
107
+ self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
108
+
109
+ def forward(self, x):
110
+ assert x.shape[1] == self.channels
111
+ if self.dims == 3:
112
+ x = F.interpolate(
113
+ x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
114
+ )
115
+ else:
116
+ x = F.interpolate(x, scale_factor=2, mode="nearest")
117
+ if self.use_conv:
118
+ x = self.conv(x)
119
+ return x
120
+
121
+ class TransposedUpsample(nn.Module):
122
+ 'Learned 2x upsampling without padding'
123
+ def __init__(self, channels, out_channels=None, ks=5):
124
+ super().__init__()
125
+ self.channels = channels
126
+ self.out_channels = out_channels or channels
127
+
128
+ self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
129
+
130
+ def forward(self,x):
131
+ return self.up(x)
132
+
133
+
134
+ class Downsample(nn.Module):
135
+ """
136
+ A downsampling layer with an optional convolution.
137
+ :param channels: channels in the inputs and outputs.
138
+ :param use_conv: a bool determining if a convolution is applied.
139
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
140
+ downsampling occurs in the inner-two dimensions.
141
+ """
142
+
143
+ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
144
+ super().__init__()
145
+ self.channels = channels
146
+ self.out_channels = out_channels or channels
147
+ self.use_conv = use_conv
148
+ self.dims = dims
149
+ stride = 2 if dims != 3 else (1, 2, 2)
150
+ if use_conv:
151
+ self.op = conv_nd(
152
+ dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
153
+ )
154
+ else:
155
+ assert self.channels == self.out_channels
156
+ self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
157
+
158
+ def forward(self, x):
159
+ assert x.shape[1] == self.channels
160
+ return self.op(x)
161
+
162
+
163
+ class ResBlock(TimestepBlock):
164
+ """
165
+ A residual block that can optionally change the number of channels.
166
+ :param channels: the number of input channels.
167
+ :param emb_channels: the number of timestep embedding channels.
168
+ :param dropout: the rate of dropout.
169
+ :param out_channels: if specified, the number of out channels.
170
+ :param use_conv: if True and out_channels is specified, use a spatial
171
+ convolution instead of a smaller 1x1 convolution to change the
172
+ channels in the skip connection.
173
+ :param dims: determines if the signal is 1D, 2D, or 3D.
174
+ :param use_checkpoint: if True, use gradient checkpointing on this module.
175
+ :param up: if True, use this block for upsampling.
176
+ :param down: if True, use this block for downsampling.
177
+ """
178
+
179
+ def __init__(
180
+ self,
181
+ channels,
182
+ emb_channels,
183
+ dropout,
184
+ out_channels=None,
185
+ use_conv=False,
186
+ use_scale_shift_norm=False,
187
+ dims=2,
188
+ use_checkpoint=False,
189
+ up=False,
190
+ down=False,
191
+ ):
192
+ # print("Using 3d aware resblock!")
193
+
194
+ super().__init__()
195
+ self.channels = channels
196
+ self.emb_channels = emb_channels
197
+ self.dropout = dropout
198
+ self.out_channels = out_channels or channels
199
+ self.use_conv = use_conv
200
+ self.use_checkpoint = use_checkpoint
201
+ self.use_scale_shift_norm = use_scale_shift_norm
202
+
203
+ self.in_layers = nn.Sequential(
204
+ normalization(channels),
205
+ nn.SiLU(),
206
+ conv_nd(dims, channels, self.out_channels, 3, padding=1),
207
+ )
208
+
209
+ self.updown = up or down
210
+
211
+ if up:
212
+ self.h_upd = Upsample(channels, False, dims)
213
+ self.x_upd = Upsample(channels, False, dims)
214
+ elif down:
215
+ self.h_upd = Downsample(channels, False, dims)
216
+ self.x_upd = Downsample(channels, False, dims)
217
+ else:
218
+ self.h_upd = self.x_upd = nn.Identity()
219
+
220
+ self.emb_layers = nn.Sequential(
221
+ nn.SiLU(),
222
+ linear(
223
+ emb_channels,
224
+ 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
225
+ ),
226
+ )
227
+ self.out_layers = nn.Sequential(
228
+ normalization(self.out_channels),
229
+ nn.SiLU(),
230
+ nn.Dropout(p=dropout),
231
+ zero_module(
232
+ conv_nd(dims, self.out_channels * 3, self.out_channels, 3, padding=1)
233
+ ),
234
+ )
235
+
236
+ if self.out_channels == channels:
237
+ self.skip_connection = nn.Identity()
238
+ elif use_conv:
239
+ self.skip_connection = conv_nd(
240
+ dims, channels, self.out_channels, 3, padding=1
241
+ )
242
+ else:
243
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
244
+
245
+ def forward(self, x, emb):
246
+ """
247
+ Apply the block to a Tensor, conditioned on a timestep embedding.
248
+ :param x: an [N x C x ...] Tensor of features.
249
+ :param emb: an [N x emb_channels] Tensor of timestep embeddings.
250
+ :return: an [N x C x ...] Tensor of outputs.
251
+ """
252
+ return checkpoint(
253
+ self._forward, (x, emb), self.parameters(), self.use_checkpoint
254
+ )
255
+
256
+ def to3daware(self, triplane):
257
+ res = triplane.shape[-2]
258
+ plane1 = triplane[..., :res]
259
+ plane2 = triplane[..., res:2*res]
260
+ plane3 = triplane[..., 2*res:3*res]
261
+
262
+ x_mp = th.nn.AvgPool2d((res, 1))
263
+ y_mp = th.nn.AvgPool2d((1, res))
264
+ x_mp_rep = lambda i: x_mp(i).repeat(1, 1, res, 1).permute(0, 1, 3, 2)
265
+ y_mp_rep = lambda i: y_mp(i).repeat(1, 1, 1, res).permute(0, 1, 3, 2)
266
+ # for plane1
267
+ plane21 = x_mp_rep(plane2)
268
+ plane31 = th.flip(y_mp_rep(plane3), (3,))
269
+ new_plane1 = th.cat([plane1, plane21, plane31], 1)
270
+ # for plane2
271
+ plane12 = y_mp_rep(plane1)
272
+ plane32 = x_mp_rep(plane3)
273
+ new_plane2 = th.cat([plane2, plane12, plane32], 1)
274
+ # for plane3
275
+ plane13 = th.flip(x_mp_rep(plane1), (2,))
276
+ plane23 = y_mp_rep(plane2)
277
+ new_plane3 = th.cat([plane3, plane13, plane23], 1)
278
+
279
+ new_plane = th.cat([new_plane1, new_plane2, new_plane3], -1).contiguous()
280
+ return new_plane
281
+
282
+ def _forward(self, x, emb):
283
+ if self.updown:
284
+ in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
285
+ h = in_rest(x)
286
+ h = self.h_upd(h)
287
+ x = self.x_upd(x)
288
+ h = in_conv(h)
289
+ else:
290
+ h = self.in_layers(x)
291
+ emb_out = self.emb_layers(emb).type(h.dtype)
292
+ while len(emb_out.shape) < len(h.shape):
293
+ emb_out = emb_out[..., None]
294
+ if self.use_scale_shift_norm:
295
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
296
+ scale, shift = th.chunk(emb_out, 2, dim=1)
297
+ h = out_norm(h) * (1 + scale) + shift
298
+ h = self.to3daware(h)
299
+ h = out_rest(h)
300
+ else:
301
+ h = h + emb_out
302
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
303
+ h = self.to3daware(out_norm(h))
304
+ h = out_rest(h)
305
+ return self.skip_connection(x) + h
306
+
307
+
308
+ class AttentionBlock(nn.Module):
309
+ """
310
+ An attention block that allows spatial positions to attend to each other.
311
+ Originally ported from here, but adapted to the N-d case.
312
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
313
+ """
314
+
315
+ def __init__(
316
+ self,
317
+ channels,
318
+ num_heads=1,
319
+ num_head_channels=-1,
320
+ use_checkpoint=False,
321
+ use_new_attention_order=False,
322
+ ):
323
+ super().__init__()
324
+ self.channels = channels
325
+ if num_head_channels == -1:
326
+ self.num_heads = num_heads
327
+ else:
328
+ assert (
329
+ channels % num_head_channels == 0
330
+ ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
331
+ self.num_heads = channels // num_head_channels
332
+ self.use_checkpoint = use_checkpoint
333
+ self.norm = normalization(channels)
334
+ self.qkv = conv_nd(1, channels, channels * 3, 1)
335
+ if use_new_attention_order:
336
+ # split qkv before split heads
337
+ self.attention = QKVAttention(self.num_heads)
338
+ else:
339
+ # split heads before split qkv
340
+ self.attention = QKVAttentionLegacy(self.num_heads)
341
+
342
+ self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
343
+
344
+ def forward(self, x):
345
+ return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
346
+ #return pt_checkpoint(self._forward, x) # pytorch
347
+
348
+ def _forward(self, x):
349
+ b, c, *spatial = x.shape
350
+ x = x.reshape(b, c, -1)
351
+ qkv = self.qkv(self.norm(x))
352
+ h = self.attention(qkv)
353
+ h = self.proj_out(h)
354
+ return (x + h).reshape(b, c, *spatial)
355
+
356
+
357
+ def count_flops_attn(model, _x, y):
358
+ """
359
+ A counter for the `thop` package to count the operations in an
360
+ attention operation.
361
+ Meant to be used like:
362
+ macs, params = thop.profile(
363
+ model,
364
+ inputs=(inputs, timestamps),
365
+ custom_ops={QKVAttention: QKVAttention.count_flops},
366
+ )
367
+ """
368
+ b, c, *spatial = y[0].shape
369
+ num_spatial = int(np.prod(spatial))
370
+ # We perform two matmuls with the same number of ops.
371
+ # The first computes the weight matrix, the second computes
372
+ # the combination of the value vectors.
373
+ matmul_ops = 2 * b * (num_spatial ** 2) * c
374
+ model.total_ops += th.DoubleTensor([matmul_ops])
375
+
376
+
377
+ class QKVAttentionLegacy(nn.Module):
378
+ """
379
+ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
380
+ """
381
+
382
+ def __init__(self, n_heads):
383
+ super().__init__()
384
+ self.n_heads = n_heads
385
+
386
+ def forward(self, qkv):
387
+ """
388
+ Apply QKV attention.
389
+ :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
390
+ :return: an [N x (H * C) x T] tensor after attention.
391
+ """
392
+ bs, width, length = qkv.shape
393
+ assert width % (3 * self.n_heads) == 0
394
+ ch = width // (3 * self.n_heads)
395
+ q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
396
+ scale = 1 / math.sqrt(math.sqrt(ch))
397
+ weight = th.einsum(
398
+ "bct,bcs->bts", q * scale, k * scale
399
+ ) # More stable with f16 than dividing afterwards
400
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
401
+ a = th.einsum("bts,bcs->bct", weight, v)
402
+ return a.reshape(bs, -1, length)
403
+
404
+ @staticmethod
405
+ def count_flops(model, _x, y):
406
+ return count_flops_attn(model, _x, y)
407
+
408
+
409
+ class QKVAttention(nn.Module):
410
+ """
411
+ A module which performs QKV attention and splits in a different order.
412
+ """
413
+
414
+ def __init__(self, n_heads):
415
+ super().__init__()
416
+ self.n_heads = n_heads
417
+
418
+ def forward(self, qkv):
419
+ """
420
+ Apply QKV attention.
421
+ :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
422
+ :return: an [N x (H * C) x T] tensor after attention.
423
+ """
424
+ bs, width, length = qkv.shape
425
+ assert width % (3 * self.n_heads) == 0
426
+ ch = width // (3 * self.n_heads)
427
+ q, k, v = qkv.chunk(3, dim=1)
428
+ scale = 1 / math.sqrt(math.sqrt(ch))
429
+ weight = th.einsum(
430
+ "bct,bcs->bts",
431
+ (q * scale).view(bs * self.n_heads, ch, length),
432
+ (k * scale).view(bs * self.n_heads, ch, length),
433
+ ) # More stable with f16 than dividing afterwards
434
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
435
+ a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
436
+ return a.reshape(bs, -1, length)
437
+
438
+ @staticmethod
439
+ def count_flops(model, _x, y):
440
+ return count_flops_attn(model, _x, y)
441
+
442
+
443
+ class UNetModel(nn.Module):
444
+ """
445
+ The full UNet model with attention and timestep embedding.
446
+ :param in_channels: channels in the input Tensor.
447
+ :param model_channels: base channel count for the model.
448
+ :param out_channels: channels in the output Tensor.
449
+ :param num_res_blocks: number of residual blocks per downsample.
450
+ :param attention_resolutions: a collection of downsample rates at which
451
+ attention will take place. May be a set, list, or tuple.
452
+ For example, if this contains 4, then at 4x downsampling, attention
453
+ will be used.
454
+ :param dropout: the dropout probability.
455
+ :param channel_mult: channel multiplier for each level of the UNet.
456
+ :param conv_resample: if True, use learned convolutions for upsampling and
457
+ downsampling.
458
+ :param dims: determines if the signal is 1D, 2D, or 3D.
459
+ :param num_classes: if specified (as an int), then this model will be
460
+ class-conditional with `num_classes` classes.
461
+ :param use_checkpoint: use gradient checkpointing to reduce memory usage.
462
+ :param num_heads: the number of attention heads in each attention layer.
463
+ :param num_heads_channels: if specified, ignore num_heads and instead use
464
+ a fixed channel width per attention head.
465
+ :param num_heads_upsample: works with num_heads to set a different number
466
+ of heads for upsampling. Deprecated.
467
+ :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
468
+ :param resblock_updown: use residual blocks for up/downsampling.
469
+ :param use_new_attention_order: use a different attention pattern for potentially
470
+ increased efficiency.
471
+ """
472
+
473
+ def __init__(
474
+ self,
475
+ image_size,
476
+ in_channels,
477
+ model_channels,
478
+ out_channels,
479
+ num_res_blocks,
480
+ attention_resolutions,
481
+ dropout=0,
482
+ channel_mult=(1, 2, 4, 8),
483
+ conv_resample=True,
484
+ dims=2,
485
+ num_classes=None,
486
+ use_checkpoint=False,
487
+ use_fp16=False,
488
+ num_heads=-1,
489
+ num_head_channels=-1,
490
+ num_heads_upsample=-1,
491
+ use_scale_shift_norm=False,
492
+ resblock_updown=False,
493
+ use_new_attention_order=False,
494
+ use_spatial_transformer=False, # custom transformer support
495
+ transformer_depth=1, # custom transformer support
496
+ context_dim=None, # custom transformer support
497
+ n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
498
+ legacy=True,
499
+ ):
500
+ super().__init__()
501
+ if use_spatial_transformer:
502
+ assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
503
+
504
+ if context_dim is not None:
505
+ assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
506
+ from omegaconf.listconfig import ListConfig
507
+ if type(context_dim) == ListConfig:
508
+ context_dim = list(context_dim)
509
+
510
+ if num_heads_upsample == -1:
511
+ num_heads_upsample = num_heads
512
+
513
+ if num_heads == -1:
514
+ assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
515
+
516
+ if num_head_channels == -1:
517
+ assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
518
+
519
+ self.image_size = image_size
520
+ self.in_channels = in_channels
521
+ self.model_channels = model_channels
522
+ self.out_channels = out_channels
523
+ self.num_res_blocks = num_res_blocks
524
+ self.attention_resolutions = attention_resolutions
525
+ self.dropout = dropout
526
+ self.channel_mult = channel_mult
527
+ self.conv_resample = conv_resample
528
+ self.num_classes = num_classes
529
+ self.use_checkpoint = use_checkpoint
530
+ self.dtype = th.float16 if use_fp16 else th.float32
531
+ self.num_heads = num_heads
532
+ self.num_head_channels = num_head_channels
533
+ self.num_heads_upsample = num_heads_upsample
534
+ self.predict_codebook_ids = n_embed is not None
535
+
536
+ time_embed_dim = model_channels * 4
537
+ self.time_embed = nn.Sequential(
538
+ linear(model_channels, time_embed_dim),
539
+ nn.SiLU(),
540
+ linear(time_embed_dim, time_embed_dim),
541
+ )
542
+
543
+ if self.num_classes is not None:
544
+ self.label_emb = nn.Embedding(num_classes, time_embed_dim)
545
+
546
+ self.input_blocks = nn.ModuleList(
547
+ [
548
+ TimestepEmbedSequential(
549
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
550
+ )
551
+ ]
552
+ )
553
+ self._feature_size = model_channels
554
+ input_block_chans = [model_channels]
555
+ ch = model_channels
556
+ ds = 1
557
+ for level, mult in enumerate(channel_mult):
558
+ for _ in range(num_res_blocks):
559
+ layers = [
560
+ ResBlock(
561
+ ch,
562
+ time_embed_dim,
563
+ dropout,
564
+ out_channels=mult * model_channels,
565
+ dims=dims,
566
+ use_checkpoint=use_checkpoint,
567
+ use_scale_shift_norm=use_scale_shift_norm,
568
+ )
569
+ ]
570
+ ch = mult * model_channels
571
+ if ds in attention_resolutions:
572
+ if num_head_channels == -1:
573
+ dim_head = ch // num_heads
574
+ else:
575
+ num_heads = ch // num_head_channels
576
+ dim_head = num_head_channels
577
+ if legacy:
578
+ #num_heads = 1
579
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
580
+ layers.append(
581
+ AttentionBlock(
582
+ ch,
583
+ use_checkpoint=use_checkpoint,
584
+ num_heads=num_heads,
585
+ num_head_channels=dim_head,
586
+ use_new_attention_order=use_new_attention_order,
587
+ ) if not use_spatial_transformer else SpatialTransformer(
588
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
589
+ )
590
+ )
591
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
592
+ self._feature_size += ch
593
+ input_block_chans.append(ch)
594
+ if level != len(channel_mult) - 1:
595
+ out_ch = ch
596
+ self.input_blocks.append(
597
+ TimestepEmbedSequential(
598
+ ResBlock(
599
+ ch,
600
+ time_embed_dim,
601
+ dropout,
602
+ out_channels=out_ch,
603
+ dims=dims,
604
+ use_checkpoint=use_checkpoint,
605
+ use_scale_shift_norm=use_scale_shift_norm,
606
+ down=True,
607
+ )
608
+ if resblock_updown
609
+ else Downsample(
610
+ ch, conv_resample, dims=dims, out_channels=out_ch
611
+ )
612
+ )
613
+ )
614
+ ch = out_ch
615
+ input_block_chans.append(ch)
616
+ ds *= 2
617
+ self._feature_size += ch
618
+
619
+ if num_head_channels == -1:
620
+ dim_head = ch // num_heads
621
+ else:
622
+ num_heads = ch // num_head_channels
623
+ dim_head = num_head_channels
624
+ if legacy:
625
+ #num_heads = 1
626
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
627
+ self.middle_block = TimestepEmbedSequential(
628
+ ResBlock(
629
+ ch,
630
+ time_embed_dim,
631
+ dropout,
632
+ dims=dims,
633
+ use_checkpoint=use_checkpoint,
634
+ use_scale_shift_norm=use_scale_shift_norm,
635
+ ),
636
+ AttentionBlock(
637
+ ch,
638
+ use_checkpoint=use_checkpoint,
639
+ num_heads=num_heads,
640
+ num_head_channels=dim_head,
641
+ use_new_attention_order=use_new_attention_order,
642
+ ) if not use_spatial_transformer else SpatialTransformer(
643
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
644
+ ),
645
+ ResBlock(
646
+ ch,
647
+ time_embed_dim,
648
+ dropout,
649
+ dims=dims,
650
+ use_checkpoint=use_checkpoint,
651
+ use_scale_shift_norm=use_scale_shift_norm,
652
+ ),
653
+ )
654
+ self._feature_size += ch
655
+
656
+ self.output_blocks = nn.ModuleList([])
657
+ for level, mult in list(enumerate(channel_mult))[::-1]:
658
+ for i in range(num_res_blocks + 1):
659
+ ich = input_block_chans.pop()
660
+ layers = [
661
+ ResBlock(
662
+ ch + ich,
663
+ time_embed_dim,
664
+ dropout,
665
+ out_channels=model_channels * mult,
666
+ dims=dims,
667
+ use_checkpoint=use_checkpoint,
668
+ use_scale_shift_norm=use_scale_shift_norm,
669
+ )
670
+ ]
671
+ ch = model_channels * mult
672
+ if ds in attention_resolutions:
673
+ if num_head_channels == -1:
674
+ dim_head = ch // num_heads
675
+ else:
676
+ num_heads = ch // num_head_channels
677
+ dim_head = num_head_channels
678
+ if legacy:
679
+ #num_heads = 1
680
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
681
+ layers.append(
682
+ AttentionBlock(
683
+ ch,
684
+ use_checkpoint=use_checkpoint,
685
+ num_heads=num_heads_upsample,
686
+ num_head_channels=dim_head,
687
+ use_new_attention_order=use_new_attention_order,
688
+ ) if not use_spatial_transformer else SpatialTransformer(
689
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
690
+ )
691
+ )
692
+ if level and i == num_res_blocks:
693
+ out_ch = ch
694
+ layers.append(
695
+ ResBlock(
696
+ ch,
697
+ time_embed_dim,
698
+ dropout,
699
+ out_channels=out_ch,
700
+ dims=dims,
701
+ use_checkpoint=use_checkpoint,
702
+ use_scale_shift_norm=use_scale_shift_norm,
703
+ up=True,
704
+ )
705
+ if resblock_updown
706
+ else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
707
+ )
708
+ ds //= 2
709
+ self.output_blocks.append(TimestepEmbedSequential(*layers))
710
+ self._feature_size += ch
711
+
712
+ self.out = nn.Sequential(
713
+ normalization(ch),
714
+ nn.SiLU(),
715
+ zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
716
+ )
717
+ if self.predict_codebook_ids:
718
+ self.id_predictor = nn.Sequential(
719
+ normalization(ch),
720
+ conv_nd(dims, model_channels, n_embed, 1),
721
+ #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
722
+ )
723
+
724
+ def convert_to_fp16(self):
725
+ """
726
+ Convert the torso of the model to float16.
727
+ """
728
+ self.input_blocks.apply(convert_module_to_f16)
729
+ self.middle_block.apply(convert_module_to_f16)
730
+ self.output_blocks.apply(convert_module_to_f16)
731
+
732
+ def convert_to_fp32(self):
733
+ """
734
+ Convert the torso of the model to float32.
735
+ """
736
+ self.input_blocks.apply(convert_module_to_f32)
737
+ self.middle_block.apply(convert_module_to_f32)
738
+ self.output_blocks.apply(convert_module_to_f32)
739
+
740
+ def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
741
+ """
742
+ Apply the model to an input batch.
743
+ :param x: an [N x C x ...] Tensor of inputs.
744
+ :param timesteps: a 1-D batch of timesteps.
745
+ :param context: conditioning plugged in via crossattn
746
+ :param y: an [N] Tensor of labels, if class-conditional.
747
+ :return: an [N x C x ...] Tensor of outputs.
748
+ """
749
+ assert (y is not None) == (
750
+ self.num_classes is not None
751
+ ), "must specify y if and only if the model is class-conditional"
752
+ hs = []
753
+ t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
754
+ emb = self.time_embed(t_emb)
755
+
756
+ if self.num_classes is not None:
757
+ assert y.shape == (x.shape[0],)
758
+ emb = emb + self.label_emb(y)
759
+
760
+ h = x.type(self.dtype)
761
+ for module in self.input_blocks:
762
+ h = module(h, emb, context)
763
+ hs.append(h)
764
+ h = self.middle_block(h, emb, context)
765
+ for module in self.output_blocks:
766
+ h = th.cat([h, hs.pop()], dim=1)
767
+ h = module(h, emb, context)
768
+ h = h.type(x.dtype)
769
+ if self.predict_codebook_ids:
770
+ return self.id_predictor(h)
771
+ else:
772
+ return self.out(h)
773
+
774
+
775
+ class EncoderUNetModel(nn.Module):
776
+ """
777
+ The half UNet model with attention and timestep embedding.
778
+ For usage, see UNet.
779
+ """
780
+
781
+ def __init__(
782
+ self,
783
+ image_size,
784
+ in_channels,
785
+ model_channels,
786
+ out_channels,
787
+ num_res_blocks,
788
+ attention_resolutions,
789
+ dropout=0,
790
+ channel_mult=(1, 2, 4, 8),
791
+ conv_resample=True,
792
+ dims=2,
793
+ use_checkpoint=False,
794
+ use_fp16=False,
795
+ num_heads=1,
796
+ num_head_channels=-1,
797
+ num_heads_upsample=-1,
798
+ use_scale_shift_norm=False,
799
+ resblock_updown=False,
800
+ use_new_attention_order=False,
801
+ pool="adaptive",
802
+ *args,
803
+ **kwargs
804
+ ):
805
+ super().__init__()
806
+
807
+ if num_heads_upsample == -1:
808
+ num_heads_upsample = num_heads
809
+
810
+ self.in_channels = in_channels
811
+ self.model_channels = model_channels
812
+ self.out_channels = out_channels
813
+ self.num_res_blocks = num_res_blocks
814
+ self.attention_resolutions = attention_resolutions
815
+ self.dropout = dropout
816
+ self.channel_mult = channel_mult
817
+ self.conv_resample = conv_resample
818
+ self.use_checkpoint = use_checkpoint
819
+ self.dtype = th.float16 if use_fp16 else th.float32
820
+ self.num_heads = num_heads
821
+ self.num_head_channels = num_head_channels
822
+ self.num_heads_upsample = num_heads_upsample
823
+
824
+ time_embed_dim = model_channels * 4
825
+ self.time_embed = nn.Sequential(
826
+ linear(model_channels, time_embed_dim),
827
+ nn.SiLU(),
828
+ linear(time_embed_dim, time_embed_dim),
829
+ )
830
+
831
+ self.input_blocks = nn.ModuleList(
832
+ [
833
+ TimestepEmbedSequential(
834
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
835
+ )
836
+ ]
837
+ )
838
+ self._feature_size = model_channels
839
+ input_block_chans = [model_channels]
840
+ ch = model_channels
841
+ ds = 1
842
+ for level, mult in enumerate(channel_mult):
843
+ for _ in range(num_res_blocks):
844
+ layers = [
845
+ ResBlock(
846
+ ch,
847
+ time_embed_dim,
848
+ dropout,
849
+ out_channels=mult * model_channels,
850
+ dims=dims,
851
+ use_checkpoint=use_checkpoint,
852
+ use_scale_shift_norm=use_scale_shift_norm,
853
+ )
854
+ ]
855
+ ch = mult * model_channels
856
+ if ds in attention_resolutions:
857
+ layers.append(
858
+ AttentionBlock(
859
+ ch,
860
+ use_checkpoint=use_checkpoint,
861
+ num_heads=num_heads,
862
+ num_head_channels=num_head_channels,
863
+ use_new_attention_order=use_new_attention_order,
864
+ )
865
+ )
866
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
867
+ self._feature_size += ch
868
+ input_block_chans.append(ch)
869
+ if level != len(channel_mult) - 1:
870
+ out_ch = ch
871
+ self.input_blocks.append(
872
+ TimestepEmbedSequential(
873
+ ResBlock(
874
+ ch,
875
+ time_embed_dim,
876
+ dropout,
877
+ out_channels=out_ch,
878
+ dims=dims,
879
+ use_checkpoint=use_checkpoint,
880
+ use_scale_shift_norm=use_scale_shift_norm,
881
+ down=True,
882
+ )
883
+ if resblock_updown
884
+ else Downsample(
885
+ ch, conv_resample, dims=dims, out_channels=out_ch
886
+ )
887
+ )
888
+ )
889
+ ch = out_ch
890
+ input_block_chans.append(ch)
891
+ ds *= 2
892
+ self._feature_size += ch
893
+
894
+ self.middle_block = TimestepEmbedSequential(
895
+ ResBlock(
896
+ ch,
897
+ time_embed_dim,
898
+ dropout,
899
+ dims=dims,
900
+ use_checkpoint=use_checkpoint,
901
+ use_scale_shift_norm=use_scale_shift_norm,
902
+ ),
903
+ AttentionBlock(
904
+ ch,
905
+ use_checkpoint=use_checkpoint,
906
+ num_heads=num_heads,
907
+ num_head_channels=num_head_channels,
908
+ use_new_attention_order=use_new_attention_order,
909
+ ),
910
+ ResBlock(
911
+ ch,
912
+ time_embed_dim,
913
+ dropout,
914
+ dims=dims,
915
+ use_checkpoint=use_checkpoint,
916
+ use_scale_shift_norm=use_scale_shift_norm,
917
+ ),
918
+ )
919
+ self._feature_size += ch
920
+ self.pool = pool
921
+ if pool == "adaptive":
922
+ self.out = nn.Sequential(
923
+ normalization(ch),
924
+ nn.SiLU(),
925
+ nn.AdaptiveAvgPool2d((1, 1)),
926
+ zero_module(conv_nd(dims, ch, out_channels, 1)),
927
+ nn.Flatten(),
928
+ )
929
+ elif pool == "attention":
930
+ assert num_head_channels != -1
931
+ self.out = nn.Sequential(
932
+ normalization(ch),
933
+ nn.SiLU(),
934
+ AttentionPool2d(
935
+ (image_size // ds), ch, num_head_channels, out_channels
936
+ ),
937
+ )
938
+ elif pool == "spatial":
939
+ self.out = nn.Sequential(
940
+ nn.Linear(self._feature_size, 2048),
941
+ nn.ReLU(),
942
+ nn.Linear(2048, self.out_channels),
943
+ )
944
+ elif pool == "spatial_v2":
945
+ self.out = nn.Sequential(
946
+ nn.Linear(self._feature_size, 2048),
947
+ normalization(2048),
948
+ nn.SiLU(),
949
+ nn.Linear(2048, self.out_channels),
950
+ )
951
+ else:
952
+ raise NotImplementedError(f"Unexpected {pool} pooling")
953
+
954
+ def convert_to_fp16(self):
955
+ """
956
+ Convert the torso of the model to float16.
957
+ """
958
+ self.input_blocks.apply(convert_module_to_f16)
959
+ self.middle_block.apply(convert_module_to_f16)
960
+
961
+ def convert_to_fp32(self):
962
+ """
963
+ Convert the torso of the model to float32.
964
+ """
965
+ self.input_blocks.apply(convert_module_to_f32)
966
+ self.middle_block.apply(convert_module_to_f32)
967
+
968
+ def forward(self, x, timesteps):
969
+ """
970
+ Apply the model to an input batch.
971
+ :param x: an [N x C x ...] Tensor of inputs.
972
+ :param timesteps: a 1-D batch of timesteps.
973
+ :return: an [N x K] Tensor of outputs.
974
+ """
975
+ emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
976
+
977
+ results = []
978
+ h = x.type(self.dtype)
979
+ for module in self.input_blocks:
980
+ h = module(h, emb)
981
+ if self.pool.startswith("spatial"):
982
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
983
+ h = self.middle_block(h, emb)
984
+ if self.pool.startswith("spatial"):
985
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
986
+ h = th.cat(results, axis=-1)
987
+ return self.out(h)
988
+ else:
989
+ h = h.type(x.dtype)
990
+ return self.out(h)
991
+
3DTopia/ldm/modules/diffusionmodules/triplane_context_crossattention_unet.py ADDED
@@ -0,0 +1,1126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ from functools import partial
3
+ import math
4
+ from typing import Iterable
5
+
6
+ import numpy as np
7
+ import torch as th
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from torch import nn, einsum
11
+ from einops import rearrange, repeat
12
+ from inspect import isfunction
13
+
14
+ from ldm.modules.diffusionmodules.util import (
15
+ checkpoint,
16
+ conv_nd,
17
+ linear,
18
+ avg_pool_nd,
19
+ zero_module,
20
+ normalization,
21
+ timestep_embedding,
22
+ )
23
+ from ldm.modules.attention import SpatialTransformer
24
+
25
+
26
+ # dummy replace
27
+ def convert_module_to_f16(x):
28
+ pass
29
+
30
+ def convert_module_to_f32(x):
31
+ pass
32
+
33
+
34
+ ## go
35
+ class AttentionPool2d(nn.Module):
36
+ """
37
+ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ spacial_dim: int,
43
+ embed_dim: int,
44
+ num_heads_channels: int,
45
+ output_dim: int = None,
46
+ ):
47
+ super().__init__()
48
+ self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
49
+ self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
50
+ self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
51
+ self.num_heads = embed_dim // num_heads_channels
52
+ self.attention = QKVAttention(self.num_heads)
53
+
54
+ def forward(self, x):
55
+ b, c, *_spatial = x.shape
56
+ x = x.reshape(b, c, -1) # NC(HW)
57
+ x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
58
+ x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
59
+ x = self.qkv_proj(x)
60
+ x = self.attention(x)
61
+ x = self.c_proj(x)
62
+ return x[:, :, 0]
63
+
64
+
65
+ class TimestepBlock(nn.Module):
66
+ """
67
+ Any module where forward() takes timestep embeddings as a second argument.
68
+ """
69
+
70
+ @abstractmethod
71
+ def forward(self, x, emb):
72
+ """
73
+ Apply the module to `x` given `emb` timestep embeddings.
74
+ """
75
+
76
+
77
+ class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
78
+ """
79
+ A sequential module that passes timestep embeddings to the children that
80
+ support it as an extra input.
81
+ """
82
+
83
+ def forward(self, x, emb, context=None):
84
+ for layer in self:
85
+ if isinstance(layer, TimestepBlock):
86
+ x = layer(x, emb)
87
+ elif isinstance(layer, TriplaneAttentionBlock):
88
+ x = layer(x, context)
89
+ else:
90
+ x = layer(x)
91
+ return x
92
+
93
+
94
+ class Upsample(nn.Module):
95
+ """
96
+ An upsampling layer with an optional convolution.
97
+ :param channels: channels in the inputs and outputs.
98
+ :param use_conv: a bool determining if a convolution is applied.
99
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
100
+ upsampling occurs in the inner-two dimensions.
101
+ """
102
+
103
+ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
104
+ super().__init__()
105
+ self.channels = channels
106
+ self.out_channels = out_channels or channels
107
+ self.use_conv = use_conv
108
+ self.dims = dims
109
+ if use_conv:
110
+ self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
111
+
112
+ def forward(self, x):
113
+ assert x.shape[1] == self.channels
114
+ if self.dims == 3:
115
+ x = F.interpolate(
116
+ x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
117
+ )
118
+ else:
119
+ x = F.interpolate(x, scale_factor=2, mode="nearest")
120
+ if self.use_conv:
121
+ x = self.conv(x)
122
+ return x
123
+
124
+ class TransposedUpsample(nn.Module):
125
+ 'Learned 2x upsampling without padding'
126
+ def __init__(self, channels, out_channels=None, ks=5):
127
+ super().__init__()
128
+ self.channels = channels
129
+ self.out_channels = out_channels or channels
130
+
131
+ self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
132
+
133
+ def forward(self,x):
134
+ return self.up(x)
135
+
136
+
137
+ class Downsample(nn.Module):
138
+ """
139
+ A downsampling layer with an optional convolution.
140
+ :param channels: channels in the inputs and outputs.
141
+ :param use_conv: a bool determining if a convolution is applied.
142
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
143
+ downsampling occurs in the inner-two dimensions.
144
+ """
145
+
146
+ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
147
+ super().__init__()
148
+ self.channels = channels
149
+ self.out_channels = out_channels or channels
150
+ self.use_conv = use_conv
151
+ self.dims = dims
152
+ stride = 2 if dims != 3 else (1, 2, 2)
153
+ if use_conv:
154
+ self.op = conv_nd(
155
+ dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
156
+ )
157
+ else:
158
+ assert self.channels == self.out_channels
159
+ self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
160
+
161
+ def forward(self, x):
162
+ assert x.shape[1] == self.channels
163
+ return self.op(x)
164
+
165
+
166
+ class ResBlock(TimestepBlock):
167
+ """
168
+ A residual block that can optionally change the number of channels.
169
+ :param channels: the number of input channels.
170
+ :param emb_channels: the number of timestep embedding channels.
171
+ :param dropout: the rate of dropout.
172
+ :param out_channels: if specified, the number of out channels.
173
+ :param use_conv: if True and out_channels is specified, use a spatial
174
+ convolution instead of a smaller 1x1 convolution to change the
175
+ channels in the skip connection.
176
+ :param dims: determines if the signal is 1D, 2D, or 3D.
177
+ :param use_checkpoint: if True, use gradient checkpointing on this module.
178
+ :param up: if True, use this block for upsampling.
179
+ :param down: if True, use this block for downsampling.
180
+ """
181
+
182
+ def __init__(
183
+ self,
184
+ channels,
185
+ emb_channels,
186
+ dropout,
187
+ out_channels=None,
188
+ use_conv=False,
189
+ use_scale_shift_norm=False,
190
+ dims=2,
191
+ use_checkpoint=False,
192
+ up=False,
193
+ down=False,
194
+ ):
195
+ super().__init__()
196
+ self.channels = channels
197
+ self.emb_channels = emb_channels
198
+ self.dropout = dropout
199
+ self.out_channels = out_channels or channels
200
+ self.use_conv = use_conv
201
+ self.use_checkpoint = use_checkpoint
202
+ self.use_scale_shift_norm = use_scale_shift_norm
203
+
204
+ self.in_layers = nn.Sequential(
205
+ normalization(channels),
206
+ nn.SiLU(),
207
+ conv_nd(dims, channels, self.out_channels, 3, padding=1),
208
+ )
209
+
210
+ self.updown = up or down
211
+
212
+ if up:
213
+ self.h_upd = Upsample(channels, False, dims)
214
+ self.x_upd = Upsample(channels, False, dims)
215
+ elif down:
216
+ self.h_upd = Downsample(channels, False, dims)
217
+ self.x_upd = Downsample(channels, False, dims)
218
+ else:
219
+ self.h_upd = self.x_upd = nn.Identity()
220
+
221
+ self.emb_layers = nn.Sequential(
222
+ nn.SiLU(),
223
+ linear(
224
+ emb_channels,
225
+ 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
226
+ ),
227
+ )
228
+ self.out_layers = nn.Sequential(
229
+ normalization(self.out_channels),
230
+ nn.SiLU(),
231
+ nn.Dropout(p=dropout),
232
+ zero_module(
233
+ conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
234
+ ),
235
+ )
236
+
237
+ if self.out_channels == channels:
238
+ self.skip_connection = nn.Identity()
239
+ elif use_conv:
240
+ self.skip_connection = conv_nd(
241
+ dims, channels, self.out_channels, 3, padding=1
242
+ )
243
+ else:
244
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
245
+
246
+ def forward(self, x, emb):
247
+ """
248
+ Apply the block to a Tensor, conditioned on a timestep embedding.
249
+ :param x: an [N x C x ...] Tensor of features.
250
+ :param emb: an [N x emb_channels] Tensor of timestep embeddings.
251
+ :return: an [N x C x ...] Tensor of outputs.
252
+ """
253
+ return checkpoint(
254
+ self._forward, (x, emb), self.parameters(), self.use_checkpoint
255
+ )
256
+
257
+
258
+ def _forward(self, x, emb):
259
+ if self.updown:
260
+ in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
261
+ h = in_rest(x)
262
+ h = self.h_upd(h)
263
+ x = self.x_upd(x)
264
+ h = in_conv(h)
265
+ else:
266
+ h = self.in_layers(x)
267
+ emb_out = self.emb_layers(emb).type(h.dtype)
268
+ while len(emb_out.shape) < len(h.shape):
269
+ emb_out = emb_out[..., None]
270
+ if self.use_scale_shift_norm:
271
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
272
+ scale, shift = th.chunk(emb_out, 2, dim=1)
273
+ h = out_norm(h) * (1 + scale) + shift
274
+ h = out_rest(h)
275
+ else:
276
+ h = h + emb_out
277
+ h = self.out_layers(h)
278
+ return self.skip_connection(x) + h
279
+
280
+ def exists(val):
281
+ return val is not None
282
+
283
+ def default(val, d):
284
+ if exists(val):
285
+ return val
286
+ return d() if isfunction(d) else d
287
+
288
+ class CrossAttention(nn.Module):
289
+ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
290
+ super().__init__()
291
+ inner_dim = dim_head * heads
292
+ context_dim = default(context_dim, query_dim)
293
+
294
+ self.scale = dim_head ** -0.5
295
+ self.heads = heads
296
+
297
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
298
+ self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
299
+ self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
300
+
301
+ self.to_out = nn.Sequential(
302
+ nn.Linear(inner_dim, query_dim),
303
+ nn.Dropout(dropout)
304
+ )
305
+
306
+ def forward(self, x, context=None, mask=None):
307
+ h = self.heads
308
+
309
+ x = x.permute(0, 2, 1)
310
+ context = context.permute(0, 2, 1)
311
+
312
+ q = self.to_q(x)
313
+ context = default(context, x)
314
+ k = self.to_k(context)
315
+ v = self.to_v(context)
316
+
317
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
318
+
319
+ sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
320
+
321
+ if exists(mask):
322
+ mask = rearrange(mask, 'b ... -> b (...)')
323
+ max_neg_value = -th.finfo(sim.dtype).max
324
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
325
+ sim.masked_fill_(~mask, max_neg_value)
326
+
327
+ # attention, what we cannot get enough of
328
+ attn = sim.softmax(dim=-1)
329
+
330
+ out = einsum('b i j, b j d -> b i d', attn, v)
331
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
332
+ return self.to_out(out).permute(0, 2, 1)
333
+
334
+ class CrossAttentionContext(nn.Module):
335
+ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
336
+ super().__init__()
337
+ inner_dim = dim_head * heads
338
+ context_dim = default(context_dim, query_dim)
339
+
340
+ self.scale = dim_head ** -0.5
341
+ self.heads = heads
342
+
343
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
344
+ self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
345
+ self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
346
+
347
+ self.to_out = nn.Sequential(
348
+ nn.Linear(inner_dim, query_dim),
349
+ nn.Dropout(dropout)
350
+ )
351
+
352
+ def forward(self, x, context=None, mask=None):
353
+
354
+ # import pdb; pdb.set_trace()
355
+
356
+ h = self.heads
357
+
358
+ x = x.permute(0, 2, 1)
359
+
360
+ q = self.to_q(x)
361
+ context = default(context, x)
362
+ k = self.to_k(context)
363
+ v = self.to_v(context)
364
+
365
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
366
+
367
+ sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
368
+
369
+ if exists(mask):
370
+ mask = rearrange(mask, 'b ... -> b (...)')
371
+ max_neg_value = -torch.finfo(sim.dtype).max
372
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
373
+ sim.masked_fill_(~mask, max_neg_value)
374
+
375
+ # attention, what we cannot get enough of
376
+ attn = sim.softmax(dim=-1)
377
+
378
+ out = einsum('b i j, b j d -> b i d', attn, v)
379
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
380
+ return self.to_out(out).permute(0, 2, 1)
381
+
382
+
383
+ class TriplaneAttentionBlock(nn.Module):
384
+ def __init__(
385
+ self,
386
+ channels,
387
+ context_channels,
388
+ num_heads=1,
389
+ num_head_channels=-1,
390
+ use_checkpoint=False,
391
+ use_new_attention_order=False,
392
+ ):
393
+ super().__init__()
394
+ self.channels = channels
395
+ if num_head_channels == -1:
396
+ self.num_heads = num_heads
397
+ else:
398
+ assert (
399
+ channels % num_head_channels == 0
400
+ ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
401
+ self.num_heads = channels // num_head_channels
402
+ self.use_checkpoint = use_checkpoint
403
+ # self.norm = normalization(channels)
404
+ self.norm1 = normalization(channels)
405
+ self.norm2 = normalization(channels)
406
+ self.norm3 = normalization(channels)
407
+ self.norm4 = normalization(channels)
408
+ self.norm5 = normalization(channels)
409
+ # self.norm6 = normalization(context_channels)
410
+ # self.norm1 = nn.LayerNorm(channels)
411
+ # self.norm2 = nn.LayerNorm(channels)
412
+ # self.norm3 = nn.LayerNorm(channels)
413
+ # self.norm4 = nn.LayerNorm(channels)
414
+ # self.norm5 = nn.LayerNorm(channels)
415
+ # self.norm6 = nn.LayerNorm(context_channels)
416
+
417
+ self.plane1_ca = CrossAttention(channels, channels, self.num_heads, num_head_channels)
418
+ self.plane2_ca = CrossAttention(channels, channels, self.num_heads, num_head_channels)
419
+ self.plane3_ca = CrossAttention(channels, channels, self.num_heads, num_head_channels)
420
+
421
+ self.context_ca = CrossAttentionContext(channels, context_channels, self.num_heads, num_head_channels)
422
+
423
+ def forward(self, x, context):
424
+ return checkpoint(self._forward, (x, context), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
425
+ #return pt_checkpoint(self._forward, x) # pytorch
426
+
427
+ def _forward(self, x, context):
428
+ b, c, *spatial = x.shape
429
+ res = x.shape[-2]
430
+ plane1 = x[..., :res].reshape(b, c, -1)
431
+ plane2 = x[..., res:res*2].reshape(b, c, -1)
432
+ plane3 = x[..., 2*res:3*res].reshape(b, c, -1)
433
+ x = x.reshape(b, c, -1)
434
+
435
+ plane1_output = self.plane1_ca(self.norm1(plane1), self.norm4(x))
436
+ plane2_output = self.plane2_ca(self.norm2(plane2), self.norm4(x))
437
+ plane3_output = self.plane3_ca(self.norm3(plane3), self.norm4(x))
438
+
439
+ h = th.cat([plane1_output, plane2_output, plane3_output], -1)
440
+
441
+ h = self.context_ca(self.norm5(h), context=context)
442
+
443
+ return (x + h).reshape(b, c, *spatial)
444
+
445
+
446
+ class AttentionBlock(nn.Module):
447
+ """
448
+ An attention block that allows spatial positions to attend to each other.
449
+ Originally ported from here, but adapted to the N-d case.
450
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
451
+ """
452
+
453
+ def __init__(
454
+ self,
455
+ channels,
456
+ num_heads=1,
457
+ num_head_channels=-1,
458
+ use_checkpoint=False,
459
+ use_new_attention_order=False,
460
+ ):
461
+ super().__init__()
462
+ self.channels = channels
463
+ if num_head_channels == -1:
464
+ self.num_heads = num_heads
465
+ else:
466
+ assert (
467
+ channels % num_head_channels == 0
468
+ ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
469
+ self.num_heads = channels // num_head_channels
470
+ self.use_checkpoint = use_checkpoint
471
+ self.norm = normalization(channels)
472
+ self.qkv = conv_nd(1, channels, channels * 3, 1)
473
+ if use_new_attention_order:
474
+ # split qkv before split heads
475
+ self.attention = QKVAttention(self.num_heads)
476
+ else:
477
+ # split heads before split qkv
478
+ self.attention = QKVAttentionLegacy(self.num_heads)
479
+
480
+ self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
481
+
482
+ def forward(self, x):
483
+ return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
484
+ #return pt_checkpoint(self._forward, x) # pytorch
485
+
486
+ def _forward(self, x):
487
+ b, c, *spatial = x.shape
488
+ x = x.reshape(b, c, -1)
489
+ qkv = self.qkv(self.norm(x))
490
+ h = self.attention(qkv)
491
+ h = self.proj_out(h)
492
+ return (x + h).reshape(b, c, *spatial)
493
+
494
+
495
+ def count_flops_attn(model, _x, y):
496
+ """
497
+ A counter for the `thop` package to count the operations in an
498
+ attention operation.
499
+ Meant to be used like:
500
+ macs, params = thop.profile(
501
+ model,
502
+ inputs=(inputs, timestamps),
503
+ custom_ops={QKVAttention: QKVAttention.count_flops},
504
+ )
505
+ """
506
+ b, c, *spatial = y[0].shape
507
+ num_spatial = int(np.prod(spatial))
508
+ # We perform two matmuls with the same number of ops.
509
+ # The first computes the weight matrix, the second computes
510
+ # the combination of the value vectors.
511
+ matmul_ops = 2 * b * (num_spatial ** 2) * c
512
+ model.total_ops += th.DoubleTensor([matmul_ops])
513
+
514
+
515
+ class QKVAttentionLegacy(nn.Module):
516
+ """
517
+ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
518
+ """
519
+
520
+ def __init__(self, n_heads):
521
+ super().__init__()
522
+ self.n_heads = n_heads
523
+
524
+ def forward(self, qkv):
525
+ """
526
+ Apply QKV attention.
527
+ :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
528
+ :return: an [N x (H * C) x T] tensor after attention.
529
+ """
530
+ bs, width, length = qkv.shape
531
+ assert width % (3 * self.n_heads) == 0
532
+ ch = width // (3 * self.n_heads)
533
+ q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
534
+ scale = 1 / math.sqrt(math.sqrt(ch))
535
+ weight = th.einsum(
536
+ "bct,bcs->bts", q * scale, k * scale
537
+ ) # More stable with f16 than dividing afterwards
538
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
539
+ a = th.einsum("bts,bcs->bct", weight, v)
540
+ return a.reshape(bs, -1, length)
541
+
542
+ @staticmethod
543
+ def count_flops(model, _x, y):
544
+ return count_flops_attn(model, _x, y)
545
+
546
+
547
+ class QKVAttention(nn.Module):
548
+ """
549
+ A module which performs QKV attention and splits in a different order.
550
+ """
551
+
552
+ def __init__(self, n_heads):
553
+ super().__init__()
554
+ self.n_heads = n_heads
555
+
556
+ def forward(self, qkv):
557
+ """
558
+ Apply QKV attention.
559
+ :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
560
+ :return: an [N x (H * C) x T] tensor after attention.
561
+ """
562
+ bs, width, length = qkv.shape
563
+ assert width % (3 * self.n_heads) == 0
564
+ ch = width // (3 * self.n_heads)
565
+ q, k, v = qkv.chunk(3, dim=1)
566
+ scale = 1 / math.sqrt(math.sqrt(ch))
567
+ weight = th.einsum(
568
+ "bct,bcs->bts",
569
+ (q * scale).view(bs * self.n_heads, ch, length),
570
+ (k * scale).view(bs * self.n_heads, ch, length),
571
+ ) # More stable with f16 than dividing afterwards
572
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
573
+ a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
574
+ return a.reshape(bs, -1, length)
575
+
576
+ @staticmethod
577
+ def count_flops(model, _x, y):
578
+ return count_flops_attn(model, _x, y)
579
+
580
+
581
+ class UNetModel(nn.Module):
582
+ """
583
+ The full UNet model with attention and timestep embedding.
584
+ :param in_channels: channels in the input Tensor.
585
+ :param model_channels: base channel count for the model.
586
+ :param out_channels: channels in the output Tensor.
587
+ :param num_res_blocks: number of residual blocks per downsample.
588
+ :param attention_resolutions: a collection of downsample rates at which
589
+ attention will take place. May be a set, list, or tuple.
590
+ For example, if this contains 4, then at 4x downsampling, attention
591
+ will be used.
592
+ :param dropout: the dropout probability.
593
+ :param channel_mult: channel multiplier for each level of the UNet.
594
+ :param conv_resample: if True, use learned convolutions for upsampling and
595
+ downsampling.
596
+ :param dims: determines if the signal is 1D, 2D, or 3D.
597
+ :param num_classes: if specified (as an int), then this model will be
598
+ class-conditional with `num_classes` classes.
599
+ :param use_checkpoint: use gradient checkpointing to reduce memory usage.
600
+ :param num_heads: the number of attention heads in each attention layer.
601
+ :param num_heads_channels: if specified, ignore num_heads and instead use
602
+ a fixed channel width per attention head.
603
+ :param num_heads_upsample: works with num_heads to set a different number
604
+ of heads for upsampling. Deprecated.
605
+ :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
606
+ :param resblock_updown: use residual blocks for up/downsampling.
607
+ :param use_new_attention_order: use a different attention pattern for potentially
608
+ increased efficiency.
609
+ """
610
+
611
+ def __init__(
612
+ self,
613
+ image_size,
614
+ in_channels,
615
+ model_channels,
616
+ out_channels,
617
+ num_res_blocks,
618
+ attention_resolutions,
619
+ dropout=0,
620
+ channel_mult=(1, 2, 4, 8),
621
+ conv_resample=True,
622
+ dims=2,
623
+ num_classes=None,
624
+ use_checkpoint=False,
625
+ use_fp16=False,
626
+ num_heads=-1,
627
+ num_head_channels=-1,
628
+ num_heads_upsample=-1,
629
+ use_scale_shift_norm=False,
630
+ resblock_updown=False,
631
+ use_new_attention_order=False,
632
+ use_spatial_transformer=False, # custom transformer support
633
+ transformer_depth=1, # custom transformer support
634
+ context_dim=None, # custom transformer support
635
+ n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
636
+ legacy=True,
637
+ ):
638
+ super().__init__()
639
+ if use_spatial_transformer:
640
+ assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
641
+
642
+ if context_dim is not None:
643
+ assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
644
+ from omegaconf.listconfig import ListConfig
645
+ if type(context_dim) == ListConfig:
646
+ context_dim = list(context_dim)
647
+
648
+ if num_heads_upsample == -1:
649
+ num_heads_upsample = num_heads
650
+
651
+ if num_heads == -1:
652
+ assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
653
+
654
+ if num_head_channels == -1:
655
+ assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
656
+
657
+ self.image_size = image_size
658
+ self.in_channels = in_channels
659
+ self.model_channels = model_channels
660
+ self.out_channels = out_channels
661
+ self.num_res_blocks = num_res_blocks
662
+ self.attention_resolutions = attention_resolutions
663
+ self.dropout = dropout
664
+ self.channel_mult = channel_mult
665
+ self.conv_resample = conv_resample
666
+ self.num_classes = num_classes
667
+ self.use_checkpoint = use_checkpoint
668
+ self.dtype = th.float16 if use_fp16 else th.float32
669
+ self.num_heads = num_heads
670
+ self.num_head_channels = num_head_channels
671
+ self.num_heads_upsample = num_heads_upsample
672
+ self.predict_codebook_ids = n_embed is not None
673
+
674
+ time_embed_dim = model_channels * 4
675
+ self.time_embed = nn.Sequential(
676
+ linear(model_channels, time_embed_dim),
677
+ nn.SiLU(),
678
+ linear(time_embed_dim, time_embed_dim),
679
+ )
680
+
681
+ if self.num_classes is not None:
682
+ self.label_emb = nn.Embedding(num_classes, time_embed_dim)
683
+
684
+ self.input_blocks = nn.ModuleList(
685
+ [
686
+ TimestepEmbedSequential(
687
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
688
+ )
689
+ ]
690
+ )
691
+ self._feature_size = model_channels
692
+ input_block_chans = [model_channels]
693
+ ch = model_channels
694
+ ds = 1
695
+ for level, mult in enumerate(channel_mult):
696
+ for _ in range(num_res_blocks):
697
+ layers = [
698
+ ResBlock(
699
+ ch,
700
+ time_embed_dim,
701
+ dropout,
702
+ out_channels=mult * model_channels,
703
+ dims=dims,
704
+ use_checkpoint=use_checkpoint,
705
+ use_scale_shift_norm=use_scale_shift_norm,
706
+ )
707
+ ]
708
+ ch = mult * model_channels
709
+ if ds in attention_resolutions:
710
+ if num_head_channels == -1:
711
+ dim_head = ch // num_heads
712
+ else:
713
+ num_heads = ch // num_head_channels
714
+ dim_head = num_head_channels
715
+ if legacy:
716
+ #num_heads = 1
717
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
718
+ layers.append(
719
+ TriplaneAttentionBlock(
720
+ ch,
721
+ context_dim,
722
+ use_checkpoint=use_checkpoint,
723
+ num_heads=num_heads,
724
+ num_head_channels=dim_head,
725
+ use_new_attention_order=use_new_attention_order,
726
+ )
727
+ )
728
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
729
+ self._feature_size += ch
730
+ input_block_chans.append(ch)
731
+ if level != len(channel_mult) - 1:
732
+ out_ch = ch
733
+ self.input_blocks.append(
734
+ TimestepEmbedSequential(
735
+ ResBlock(
736
+ ch,
737
+ time_embed_dim,
738
+ dropout,
739
+ out_channels=out_ch,
740
+ dims=dims,
741
+ use_checkpoint=use_checkpoint,
742
+ use_scale_shift_norm=use_scale_shift_norm,
743
+ down=True,
744
+ )
745
+ if resblock_updown
746
+ else Downsample(
747
+ ch, conv_resample, dims=dims, out_channels=out_ch
748
+ )
749
+ )
750
+ )
751
+ ch = out_ch
752
+ input_block_chans.append(ch)
753
+ ds *= 2
754
+ self._feature_size += ch
755
+
756
+ if num_head_channels == -1:
757
+ dim_head = ch // num_heads
758
+ else:
759
+ num_heads = ch // num_head_channels
760
+ dim_head = num_head_channels
761
+ if legacy:
762
+ #num_heads = 1
763
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
764
+ self.middle_block = TimestepEmbedSequential(
765
+ ResBlock(
766
+ ch,
767
+ time_embed_dim,
768
+ dropout,
769
+ dims=dims,
770
+ use_checkpoint=use_checkpoint,
771
+ use_scale_shift_norm=use_scale_shift_norm,
772
+ ),
773
+ TriplaneAttentionBlock(
774
+ ch,
775
+ context_dim,
776
+ use_checkpoint=use_checkpoint,
777
+ num_heads=num_heads,
778
+ num_head_channels=dim_head,
779
+ use_new_attention_order=use_new_attention_order,
780
+ ),
781
+ ResBlock(
782
+ ch,
783
+ time_embed_dim,
784
+ dropout,
785
+ dims=dims,
786
+ use_checkpoint=use_checkpoint,
787
+ use_scale_shift_norm=use_scale_shift_norm,
788
+ ),
789
+ )
790
+ self._feature_size += ch
791
+
792
+ self.output_blocks = nn.ModuleList([])
793
+ for level, mult in list(enumerate(channel_mult))[::-1]:
794
+ for i in range(num_res_blocks + 1):
795
+ ich = input_block_chans.pop()
796
+ layers = [
797
+ ResBlock(
798
+ ch + ich,
799
+ time_embed_dim,
800
+ dropout,
801
+ out_channels=model_channels * mult,
802
+ dims=dims,
803
+ use_checkpoint=use_checkpoint,
804
+ use_scale_shift_norm=use_scale_shift_norm,
805
+ )
806
+ ]
807
+ ch = model_channels * mult
808
+ if ds in attention_resolutions:
809
+ if num_head_channels == -1:
810
+ dim_head = ch // num_heads
811
+ else:
812
+ num_heads = ch // num_head_channels
813
+ dim_head = num_head_channels
814
+ if legacy:
815
+ #num_heads = 1
816
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
817
+ layers.append(
818
+ TriplaneAttentionBlock(
819
+ ch,
820
+ context_dim,
821
+ use_checkpoint=use_checkpoint,
822
+ num_heads=num_heads_upsample,
823
+ num_head_channels=dim_head,
824
+ use_new_attention_order=use_new_attention_order,
825
+ )
826
+ )
827
+ if level and i == num_res_blocks:
828
+ out_ch = ch
829
+ layers.append(
830
+ ResBlock(
831
+ ch,
832
+ time_embed_dim,
833
+ dropout,
834
+ out_channels=out_ch,
835
+ dims=dims,
836
+ use_checkpoint=use_checkpoint,
837
+ use_scale_shift_norm=use_scale_shift_norm,
838
+ up=True,
839
+ )
840
+ if resblock_updown
841
+ else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
842
+ )
843
+ ds //= 2
844
+ self.output_blocks.append(TimestepEmbedSequential(*layers))
845
+ self._feature_size += ch
846
+
847
+ self.out = nn.Sequential(
848
+ normalization(ch),
849
+ nn.SiLU(),
850
+ zero_module(conv_nd(dims, ch, out_channels, 3, padding=1)),
851
+ )
852
+ if self.predict_codebook_ids:
853
+ self.id_predictor = nn.Sequential(
854
+ normalization(ch),
855
+ conv_nd(dims, ch, n_embed, 1),
856
+ #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
857
+ )
858
+
859
+ def convert_to_fp16(self):
860
+ """
861
+ Convert the torso of the model to float16.
862
+ """
863
+ self.input_blocks.apply(convert_module_to_f16)
864
+ self.middle_block.apply(convert_module_to_f16)
865
+ self.output_blocks.apply(convert_module_to_f16)
866
+
867
+ def convert_to_fp32(self):
868
+ """
869
+ Convert the torso of the model to float32.
870
+ """
871
+ self.input_blocks.apply(convert_module_to_f32)
872
+ self.middle_block.apply(convert_module_to_f32)
873
+ self.output_blocks.apply(convert_module_to_f32)
874
+
875
+ def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
876
+ """
877
+ Apply the model to an input batch.
878
+ :param x: an [N x C x ...] Tensor of inputs.
879
+ :param timesteps: a 1-D batch of timesteps.
880
+ :param context: conditioning plugged in via crossattn
881
+ :param y: an [N] Tensor of labels, if class-conditional.
882
+ :return: an [N x C x ...] Tensor of outputs.
883
+ """
884
+ assert (y is not None) == (
885
+ self.num_classes is not None
886
+ ), "must specify y if and only if the model is class-conditional"
887
+ hs = []
888
+ t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
889
+ emb = self.time_embed(t_emb)
890
+
891
+ if self.num_classes is not None:
892
+ assert y.shape == (x.shape[0],)
893
+ emb = emb + self.label_emb(y)
894
+
895
+ h = x.type(self.dtype)
896
+ for module in self.input_blocks:
897
+ h = module(h, emb, context)
898
+ hs.append(h)
899
+ h = self.middle_block(h, emb, context)
900
+ for module in self.output_blocks:
901
+ h = th.cat([h, hs.pop()], dim=1)
902
+ h = module(h, emb, context)
903
+ h = h.type(x.dtype)
904
+ if self.predict_codebook_ids:
905
+ return self.id_predictor(h)
906
+ else:
907
+ return self.out(h)
908
+
909
+
910
+ class EncoderUNetModel(nn.Module):
911
+ """
912
+ The half UNet model with attention and timestep embedding.
913
+ For usage, see UNet.
914
+ """
915
+
916
+ def __init__(
917
+ self,
918
+ image_size,
919
+ in_channels,
920
+ model_channels,
921
+ out_channels,
922
+ num_res_blocks,
923
+ attention_resolutions,
924
+ dropout=0,
925
+ channel_mult=(1, 2, 4, 8),
926
+ conv_resample=True,
927
+ dims=2,
928
+ use_checkpoint=False,
929
+ use_fp16=False,
930
+ num_heads=1,
931
+ num_head_channels=-1,
932
+ num_heads_upsample=-1,
933
+ use_scale_shift_norm=False,
934
+ resblock_updown=False,
935
+ use_new_attention_order=False,
936
+ pool="adaptive",
937
+ *args,
938
+ **kwargs
939
+ ):
940
+ super().__init__()
941
+
942
+ if num_heads_upsample == -1:
943
+ num_heads_upsample = num_heads
944
+
945
+ self.in_channels = in_channels
946
+ self.model_channels = model_channels
947
+ self.out_channels = out_channels
948
+ self.num_res_blocks = num_res_blocks
949
+ self.attention_resolutions = attention_resolutions
950
+ self.dropout = dropout
951
+ self.channel_mult = channel_mult
952
+ self.conv_resample = conv_resample
953
+ self.use_checkpoint = use_checkpoint
954
+ self.dtype = th.float16 if use_fp16 else th.float32
955
+ self.num_heads = num_heads
956
+ self.num_head_channels = num_head_channels
957
+ self.num_heads_upsample = num_heads_upsample
958
+
959
+ time_embed_dim = model_channels * 4
960
+ self.time_embed = nn.Sequential(
961
+ linear(model_channels, time_embed_dim),
962
+ nn.SiLU(),
963
+ linear(time_embed_dim, time_embed_dim),
964
+ )
965
+
966
+ self.input_blocks = nn.ModuleList(
967
+ [
968
+ TimestepEmbedSequential(
969
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
970
+ )
971
+ ]
972
+ )
973
+ self._feature_size = model_channels
974
+ input_block_chans = [model_channels]
975
+ ch = model_channels
976
+ ds = 1
977
+ for level, mult in enumerate(channel_mult):
978
+ for _ in range(num_res_blocks):
979
+ layers = [
980
+ ResBlock(
981
+ ch,
982
+ time_embed_dim,
983
+ dropout,
984
+ out_channels=mult * model_channels,
985
+ dims=dims,
986
+ use_checkpoint=use_checkpoint,
987
+ use_scale_shift_norm=use_scale_shift_norm,
988
+ )
989
+ ]
990
+ ch = mult * model_channels
991
+ if ds in attention_resolutions:
992
+ layers.append(
993
+ AttentionBlock(
994
+ ch,
995
+ use_checkpoint=use_checkpoint,
996
+ num_heads=num_heads,
997
+ num_head_channels=num_head_channels,
998
+ use_new_attention_order=use_new_attention_order,
999
+ )
1000
+ )
1001
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
1002
+ self._feature_size += ch
1003
+ input_block_chans.append(ch)
1004
+ if level != len(channel_mult) - 1:
1005
+ out_ch = ch
1006
+ self.input_blocks.append(
1007
+ TimestepEmbedSequential(
1008
+ ResBlock(
1009
+ ch,
1010
+ time_embed_dim,
1011
+ dropout,
1012
+ out_channels=out_ch,
1013
+ dims=dims,
1014
+ use_checkpoint=use_checkpoint,
1015
+ use_scale_shift_norm=use_scale_shift_norm,
1016
+ down=True,
1017
+ )
1018
+ if resblock_updown
1019
+ else Downsample(
1020
+ ch, conv_resample, dims=dims, out_channels=out_ch
1021
+ )
1022
+ )
1023
+ )
1024
+ ch = out_ch
1025
+ input_block_chans.append(ch)
1026
+ ds *= 2
1027
+ self._feature_size += ch
1028
+
1029
+ self.middle_block = TimestepEmbedSequential(
1030
+ ResBlock(
1031
+ ch,
1032
+ time_embed_dim,
1033
+ dropout,
1034
+ dims=dims,
1035
+ use_checkpoint=use_checkpoint,
1036
+ use_scale_shift_norm=use_scale_shift_norm,
1037
+ ),
1038
+ AttentionBlock(
1039
+ ch,
1040
+ use_checkpoint=use_checkpoint,
1041
+ num_heads=num_heads,
1042
+ num_head_channels=num_head_channels,
1043
+ use_new_attention_order=use_new_attention_order,
1044
+ ),
1045
+ ResBlock(
1046
+ ch,
1047
+ time_embed_dim,
1048
+ dropout,
1049
+ dims=dims,
1050
+ use_checkpoint=use_checkpoint,
1051
+ use_scale_shift_norm=use_scale_shift_norm,
1052
+ ),
1053
+ )
1054
+ self._feature_size += ch
1055
+ self.pool = pool
1056
+ if pool == "adaptive":
1057
+ self.out = nn.Sequential(
1058
+ normalization(ch),
1059
+ nn.SiLU(),
1060
+ nn.AdaptiveAvgPool2d((1, 1)),
1061
+ zero_module(conv_nd(dims, ch, out_channels, 1)),
1062
+ nn.Flatten(),
1063
+ )
1064
+ elif pool == "attention":
1065
+ assert num_head_channels != -1
1066
+ self.out = nn.Sequential(
1067
+ normalization(ch),
1068
+ nn.SiLU(),
1069
+ AttentionPool2d(
1070
+ (image_size // ds), ch, num_head_channels, out_channels
1071
+ ),
1072
+ )
1073
+ elif pool == "spatial":
1074
+ self.out = nn.Sequential(
1075
+ nn.Linear(self._feature_size, 2048),
1076
+ nn.ReLU(),
1077
+ nn.Linear(2048, self.out_channels),
1078
+ )
1079
+ elif pool == "spatial_v2":
1080
+ self.out = nn.Sequential(
1081
+ nn.Linear(self._feature_size, 2048),
1082
+ normalization(2048),
1083
+ nn.SiLU(),
1084
+ nn.Linear(2048, self.out_channels),
1085
+ )
1086
+ else:
1087
+ raise NotImplementedError(f"Unexpected {pool} pooling")
1088
+
1089
+ def convert_to_fp16(self):
1090
+ """
1091
+ Convert the torso of the model to float16.
1092
+ """
1093
+ self.input_blocks.apply(convert_module_to_f16)
1094
+ self.middle_block.apply(convert_module_to_f16)
1095
+
1096
+ def convert_to_fp32(self):
1097
+ """
1098
+ Convert the torso of the model to float32.
1099
+ """
1100
+ self.input_blocks.apply(convert_module_to_f32)
1101
+ self.middle_block.apply(convert_module_to_f32)
1102
+
1103
+ def forward(self, x, timesteps):
1104
+ """
1105
+ Apply the model to an input batch.
1106
+ :param x: an [N x C x ...] Tensor of inputs.
1107
+ :param timesteps: a 1-D batch of timesteps.
1108
+ :return: an [N x K] Tensor of outputs.
1109
+ """
1110
+ emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
1111
+
1112
+ results = []
1113
+ h = x.type(self.dtype)
1114
+ for module in self.input_blocks:
1115
+ h = module(h, emb)
1116
+ if self.pool.startswith("spatial"):
1117
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
1118
+ h = self.middle_block(h, emb)
1119
+ if self.pool.startswith("spatial"):
1120
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
1121
+ h = th.cat(results, axis=-1)
1122
+ return self.out(h)
1123
+ else:
1124
+ h = h.type(x.dtype)
1125
+ return self.out(h)
1126
+
3DTopia/ldm/modules/diffusionmodules/triplane_crossattention_unet.py ADDED
@@ -0,0 +1,1058 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ from functools import partial
3
+ import math
4
+ from typing import Iterable
5
+
6
+ import numpy as np
7
+ import torch as th
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from torch import nn, einsum
11
+ from einops import rearrange, repeat
12
+ from inspect import isfunction
13
+
14
+ from ldm.modules.diffusionmodules.util import (
15
+ checkpoint,
16
+ conv_nd,
17
+ linear,
18
+ avg_pool_nd,
19
+ zero_module,
20
+ normalization,
21
+ timestep_embedding,
22
+ )
23
+ from ldm.modules.attention import SpatialTransformer
24
+
25
+
26
+ # dummy replace
27
+ def convert_module_to_f16(x):
28
+ pass
29
+
30
+ def convert_module_to_f32(x):
31
+ pass
32
+
33
+
34
+ ## go
35
+ class AttentionPool2d(nn.Module):
36
+ """
37
+ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ spacial_dim: int,
43
+ embed_dim: int,
44
+ num_heads_channels: int,
45
+ output_dim: int = None,
46
+ ):
47
+ super().__init__()
48
+ self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
49
+ self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
50
+ self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
51
+ self.num_heads = embed_dim // num_heads_channels
52
+ self.attention = QKVAttention(self.num_heads)
53
+
54
+ def forward(self, x):
55
+ b, c, *_spatial = x.shape
56
+ x = x.reshape(b, c, -1) # NC(HW)
57
+ x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
58
+ x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
59
+ x = self.qkv_proj(x)
60
+ x = self.attention(x)
61
+ x = self.c_proj(x)
62
+ return x[:, :, 0]
63
+
64
+
65
+ class TimestepBlock(nn.Module):
66
+ """
67
+ Any module where forward() takes timestep embeddings as a second argument.
68
+ """
69
+
70
+ @abstractmethod
71
+ def forward(self, x, emb):
72
+ """
73
+ Apply the module to `x` given `emb` timestep embeddings.
74
+ """
75
+
76
+
77
+ class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
78
+ """
79
+ A sequential module that passes timestep embeddings to the children that
80
+ support it as an extra input.
81
+ """
82
+
83
+ def forward(self, x, emb, context=None):
84
+ for layer in self:
85
+ if isinstance(layer, TimestepBlock):
86
+ x = layer(x, emb)
87
+ elif isinstance(layer, SpatialTransformer):
88
+ x = layer(x, context)
89
+ else:
90
+ x = layer(x)
91
+ return x
92
+
93
+
94
+ class Upsample(nn.Module):
95
+ """
96
+ An upsampling layer with an optional convolution.
97
+ :param channels: channels in the inputs and outputs.
98
+ :param use_conv: a bool determining if a convolution is applied.
99
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
100
+ upsampling occurs in the inner-two dimensions.
101
+ """
102
+
103
+ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
104
+ super().__init__()
105
+ self.channels = channels
106
+ self.out_channels = out_channels or channels
107
+ self.use_conv = use_conv
108
+ self.dims = dims
109
+ if use_conv:
110
+ self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
111
+
112
+ def forward(self, x):
113
+ assert x.shape[1] == self.channels
114
+ if self.dims == 3:
115
+ x = F.interpolate(
116
+ x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
117
+ )
118
+ else:
119
+ x = F.interpolate(x, scale_factor=2, mode="nearest")
120
+ if self.use_conv:
121
+ x = self.conv(x)
122
+ return x
123
+
124
+ class TransposedUpsample(nn.Module):
125
+ 'Learned 2x upsampling without padding'
126
+ def __init__(self, channels, out_channels=None, ks=5):
127
+ super().__init__()
128
+ self.channels = channels
129
+ self.out_channels = out_channels or channels
130
+
131
+ self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
132
+
133
+ def forward(self,x):
134
+ return self.up(x)
135
+
136
+
137
+ class Downsample(nn.Module):
138
+ """
139
+ A downsampling layer with an optional convolution.
140
+ :param channels: channels in the inputs and outputs.
141
+ :param use_conv: a bool determining if a convolution is applied.
142
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
143
+ downsampling occurs in the inner-two dimensions.
144
+ """
145
+
146
+ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
147
+ super().__init__()
148
+ self.channels = channels
149
+ self.out_channels = out_channels or channels
150
+ self.use_conv = use_conv
151
+ self.dims = dims
152
+ stride = 2 if dims != 3 else (1, 2, 2)
153
+ if use_conv:
154
+ self.op = conv_nd(
155
+ dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
156
+ )
157
+ else:
158
+ assert self.channels == self.out_channels
159
+ self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
160
+
161
+ def forward(self, x):
162
+ assert x.shape[1] == self.channels
163
+ return self.op(x)
164
+
165
+
166
+ class ResBlock(TimestepBlock):
167
+ """
168
+ A residual block that can optionally change the number of channels.
169
+ :param channels: the number of input channels.
170
+ :param emb_channels: the number of timestep embedding channels.
171
+ :param dropout: the rate of dropout.
172
+ :param out_channels: if specified, the number of out channels.
173
+ :param use_conv: if True and out_channels is specified, use a spatial
174
+ convolution instead of a smaller 1x1 convolution to change the
175
+ channels in the skip connection.
176
+ :param dims: determines if the signal is 1D, 2D, or 3D.
177
+ :param use_checkpoint: if True, use gradient checkpointing on this module.
178
+ :param up: if True, use this block for upsampling.
179
+ :param down: if True, use this block for downsampling.
180
+ """
181
+
182
+ def __init__(
183
+ self,
184
+ channels,
185
+ emb_channels,
186
+ dropout,
187
+ out_channels=None,
188
+ use_conv=False,
189
+ use_scale_shift_norm=False,
190
+ dims=2,
191
+ use_checkpoint=False,
192
+ up=False,
193
+ down=False,
194
+ ):
195
+ super().__init__()
196
+ self.channels = channels
197
+ self.emb_channels = emb_channels
198
+ self.dropout = dropout
199
+ self.out_channels = out_channels or channels
200
+ self.use_conv = use_conv
201
+ self.use_checkpoint = use_checkpoint
202
+ self.use_scale_shift_norm = use_scale_shift_norm
203
+
204
+ self.in_layers = nn.Sequential(
205
+ normalization(channels),
206
+ nn.SiLU(),
207
+ conv_nd(dims, channels, self.out_channels, 3, padding=1),
208
+ )
209
+
210
+ self.updown = up or down
211
+
212
+ if up:
213
+ self.h_upd = Upsample(channels, False, dims)
214
+ self.x_upd = Upsample(channels, False, dims)
215
+ elif down:
216
+ self.h_upd = Downsample(channels, False, dims)
217
+ self.x_upd = Downsample(channels, False, dims)
218
+ else:
219
+ self.h_upd = self.x_upd = nn.Identity()
220
+
221
+ self.emb_layers = nn.Sequential(
222
+ nn.SiLU(),
223
+ linear(
224
+ emb_channels,
225
+ 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
226
+ ),
227
+ )
228
+ self.out_layers = nn.Sequential(
229
+ normalization(self.out_channels),
230
+ nn.SiLU(),
231
+ nn.Dropout(p=dropout),
232
+ zero_module(
233
+ conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
234
+ ),
235
+ )
236
+
237
+ if self.out_channels == channels:
238
+ self.skip_connection = nn.Identity()
239
+ elif use_conv:
240
+ self.skip_connection = conv_nd(
241
+ dims, channels, self.out_channels, 3, padding=1
242
+ )
243
+ else:
244
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
245
+
246
+ def forward(self, x, emb):
247
+ """
248
+ Apply the block to a Tensor, conditioned on a timestep embedding.
249
+ :param x: an [N x C x ...] Tensor of features.
250
+ :param emb: an [N x emb_channels] Tensor of timestep embeddings.
251
+ :return: an [N x C x ...] Tensor of outputs.
252
+ """
253
+ return checkpoint(
254
+ self._forward, (x, emb), self.parameters(), self.use_checkpoint
255
+ )
256
+
257
+
258
+ def _forward(self, x, emb):
259
+ if self.updown:
260
+ in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
261
+ h = in_rest(x)
262
+ h = self.h_upd(h)
263
+ x = self.x_upd(x)
264
+ h = in_conv(h)
265
+ else:
266
+ h = self.in_layers(x)
267
+ emb_out = self.emb_layers(emb).type(h.dtype)
268
+ while len(emb_out.shape) < len(h.shape):
269
+ emb_out = emb_out[..., None]
270
+ if self.use_scale_shift_norm:
271
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
272
+ scale, shift = th.chunk(emb_out, 2, dim=1)
273
+ h = out_norm(h) * (1 + scale) + shift
274
+ h = out_rest(h)
275
+ else:
276
+ h = h + emb_out
277
+ h = self.out_layers(h)
278
+ return self.skip_connection(x) + h
279
+
280
+ def exists(val):
281
+ return val is not None
282
+
283
+ def default(val, d):
284
+ if exists(val):
285
+ return val
286
+ return d() if isfunction(d) else d
287
+
288
+ class CrossAttention(nn.Module):
289
+ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
290
+ super().__init__()
291
+ inner_dim = dim_head * heads
292
+ context_dim = default(context_dim, query_dim)
293
+
294
+ self.scale = dim_head ** -0.5
295
+ self.heads = heads
296
+
297
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
298
+ self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
299
+ self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
300
+
301
+ self.to_out = nn.Sequential(
302
+ nn.Linear(inner_dim, query_dim),
303
+ nn.Dropout(dropout)
304
+ )
305
+
306
+ def forward(self, x, context=None, mask=None):
307
+ h = self.heads
308
+
309
+ x = x.permute(0, 2, 1)
310
+ context = context.permute(0, 2, 1)
311
+
312
+ q = self.to_q(x)
313
+ context = default(context, x)
314
+ k = self.to_k(context)
315
+ v = self.to_v(context)
316
+
317
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
318
+
319
+ sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
320
+
321
+ if exists(mask):
322
+ mask = rearrange(mask, 'b ... -> b (...)')
323
+ max_neg_value = -th.finfo(sim.dtype).max
324
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
325
+ sim.masked_fill_(~mask, max_neg_value)
326
+
327
+ # attention, what we cannot get enough of
328
+ attn = sim.softmax(dim=-1)
329
+
330
+ out = einsum('b i j, b j d -> b i d', attn, v)
331
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
332
+ return self.to_out(out).permute(0, 2, 1)
333
+
334
+
335
+ class TriplaneAttentionBlock(nn.Module):
336
+ def __init__(
337
+ self,
338
+ channels,
339
+ num_heads=1,
340
+ num_head_channels=-1,
341
+ use_checkpoint=False,
342
+ use_new_attention_order=False,
343
+ ):
344
+ super().__init__()
345
+ self.channels = channels
346
+ if num_head_channels == -1:
347
+ self.num_heads = num_heads
348
+ else:
349
+ assert (
350
+ channels % num_head_channels == 0
351
+ ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
352
+ self.num_heads = channels // num_head_channels
353
+ self.use_checkpoint = use_checkpoint
354
+ self.norm = normalization(channels)
355
+
356
+ self.plane1_ca = CrossAttention(channels, channels, self.num_heads, num_head_channels)
357
+ self.plane2_ca = CrossAttention(channels, channels, self.num_heads, num_head_channels)
358
+ self.plane3_ca = CrossAttention(channels, channels, self.num_heads, num_head_channels)
359
+
360
+ def forward(self, x):
361
+ return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
362
+ #return pt_checkpoint(self._forward, x) # pytorch
363
+
364
+ def _forward(self, x):
365
+ b, c, *spatial = x.shape
366
+ res = x.shape[-2]
367
+ plane1 = x[..., :res].reshape(b, c, -1)
368
+ plane2 = x[..., res:res*2].reshape(b, c, -1)
369
+ plane3 = x[..., 2*res:3*res].reshape(b, c, -1)
370
+ x = x.reshape(b, c, -1)
371
+
372
+ plane1_output = self.plane1_ca(self.norm(plane1), self.norm(x))
373
+ plane2_output = self.plane2_ca(self.norm(plane2), self.norm(x))
374
+ plane3_output = self.plane3_ca(self.norm(plane3), self.norm(x))
375
+
376
+ h = th.cat([plane1_output, plane2_output, plane3_output], -1)
377
+
378
+ return (x + h).reshape(b, c, *spatial)
379
+
380
+
381
+ class AttentionBlock(nn.Module):
382
+ """
383
+ An attention block that allows spatial positions to attend to each other.
384
+ Originally ported from here, but adapted to the N-d case.
385
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
386
+ """
387
+
388
+ def __init__(
389
+ self,
390
+ channels,
391
+ num_heads=1,
392
+ num_head_channels=-1,
393
+ use_checkpoint=False,
394
+ use_new_attention_order=False,
395
+ ):
396
+ super().__init__()
397
+ self.channels = channels
398
+ if num_head_channels == -1:
399
+ self.num_heads = num_heads
400
+ else:
401
+ assert (
402
+ channels % num_head_channels == 0
403
+ ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
404
+ self.num_heads = channels // num_head_channels
405
+ self.use_checkpoint = use_checkpoint
406
+ self.norm = normalization(channels)
407
+ self.qkv = conv_nd(1, channels, channels * 3, 1)
408
+ if use_new_attention_order:
409
+ # split qkv before split heads
410
+ self.attention = QKVAttention(self.num_heads)
411
+ else:
412
+ # split heads before split qkv
413
+ self.attention = QKVAttentionLegacy(self.num_heads)
414
+
415
+ self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
416
+
417
+ def forward(self, x):
418
+ return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
419
+ #return pt_checkpoint(self._forward, x) # pytorch
420
+
421
+ def _forward(self, x):
422
+ b, c, *spatial = x.shape
423
+ x = x.reshape(b, c, -1)
424
+ qkv = self.qkv(self.norm(x))
425
+ h = self.attention(qkv)
426
+ h = self.proj_out(h)
427
+ return (x + h).reshape(b, c, *spatial)
428
+
429
+
430
+ def count_flops_attn(model, _x, y):
431
+ """
432
+ A counter for the `thop` package to count the operations in an
433
+ attention operation.
434
+ Meant to be used like:
435
+ macs, params = thop.profile(
436
+ model,
437
+ inputs=(inputs, timestamps),
438
+ custom_ops={QKVAttention: QKVAttention.count_flops},
439
+ )
440
+ """
441
+ b, c, *spatial = y[0].shape
442
+ num_spatial = int(np.prod(spatial))
443
+ # We perform two matmuls with the same number of ops.
444
+ # The first computes the weight matrix, the second computes
445
+ # the combination of the value vectors.
446
+ matmul_ops = 2 * b * (num_spatial ** 2) * c
447
+ model.total_ops += th.DoubleTensor([matmul_ops])
448
+
449
+
450
+ class QKVAttentionLegacy(nn.Module):
451
+ """
452
+ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
453
+ """
454
+
455
+ def __init__(self, n_heads):
456
+ super().__init__()
457
+ self.n_heads = n_heads
458
+
459
+ def forward(self, qkv):
460
+ """
461
+ Apply QKV attention.
462
+ :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
463
+ :return: an [N x (H * C) x T] tensor after attention.
464
+ """
465
+ bs, width, length = qkv.shape
466
+ assert width % (3 * self.n_heads) == 0
467
+ ch = width // (3 * self.n_heads)
468
+ q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
469
+ scale = 1 / math.sqrt(math.sqrt(ch))
470
+ weight = th.einsum(
471
+ "bct,bcs->bts", q * scale, k * scale
472
+ ) # More stable with f16 than dividing afterwards
473
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
474
+ a = th.einsum("bts,bcs->bct", weight, v)
475
+ return a.reshape(bs, -1, length)
476
+
477
+ @staticmethod
478
+ def count_flops(model, _x, y):
479
+ return count_flops_attn(model, _x, y)
480
+
481
+
482
+ class QKVAttention(nn.Module):
483
+ """
484
+ A module which performs QKV attention and splits in a different order.
485
+ """
486
+
487
+ def __init__(self, n_heads):
488
+ super().__init__()
489
+ self.n_heads = n_heads
490
+
491
+ def forward(self, qkv):
492
+ """
493
+ Apply QKV attention.
494
+ :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
495
+ :return: an [N x (H * C) x T] tensor after attention.
496
+ """
497
+ bs, width, length = qkv.shape
498
+ assert width % (3 * self.n_heads) == 0
499
+ ch = width // (3 * self.n_heads)
500
+ q, k, v = qkv.chunk(3, dim=1)
501
+ scale = 1 / math.sqrt(math.sqrt(ch))
502
+ weight = th.einsum(
503
+ "bct,bcs->bts",
504
+ (q * scale).view(bs * self.n_heads, ch, length),
505
+ (k * scale).view(bs * self.n_heads, ch, length),
506
+ ) # More stable with f16 than dividing afterwards
507
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
508
+ a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
509
+ return a.reshape(bs, -1, length)
510
+
511
+ @staticmethod
512
+ def count_flops(model, _x, y):
513
+ return count_flops_attn(model, _x, y)
514
+
515
+
516
+ class UNetModel(nn.Module):
517
+ """
518
+ The full UNet model with attention and timestep embedding.
519
+ :param in_channels: channels in the input Tensor.
520
+ :param model_channels: base channel count for the model.
521
+ :param out_channels: channels in the output Tensor.
522
+ :param num_res_blocks: number of residual blocks per downsample.
523
+ :param attention_resolutions: a collection of downsample rates at which
524
+ attention will take place. May be a set, list, or tuple.
525
+ For example, if this contains 4, then at 4x downsampling, attention
526
+ will be used.
527
+ :param dropout: the dropout probability.
528
+ :param channel_mult: channel multiplier for each level of the UNet.
529
+ :param conv_resample: if True, use learned convolutions for upsampling and
530
+ downsampling.
531
+ :param dims: determines if the signal is 1D, 2D, or 3D.
532
+ :param num_classes: if specified (as an int), then this model will be
533
+ class-conditional with `num_classes` classes.
534
+ :param use_checkpoint: use gradient checkpointing to reduce memory usage.
535
+ :param num_heads: the number of attention heads in each attention layer.
536
+ :param num_heads_channels: if specified, ignore num_heads and instead use
537
+ a fixed channel width per attention head.
538
+ :param num_heads_upsample: works with num_heads to set a different number
539
+ of heads for upsampling. Deprecated.
540
+ :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
541
+ :param resblock_updown: use residual blocks for up/downsampling.
542
+ :param use_new_attention_order: use a different attention pattern for potentially
543
+ increased efficiency.
544
+ """
545
+
546
+ def __init__(
547
+ self,
548
+ image_size,
549
+ in_channels,
550
+ model_channels,
551
+ out_channels,
552
+ num_res_blocks,
553
+ attention_resolutions,
554
+ dropout=0,
555
+ channel_mult=(1, 2, 4, 8),
556
+ conv_resample=True,
557
+ dims=2,
558
+ num_classes=None,
559
+ use_checkpoint=False,
560
+ use_fp16=False,
561
+ num_heads=-1,
562
+ num_head_channels=-1,
563
+ num_heads_upsample=-1,
564
+ use_scale_shift_norm=False,
565
+ resblock_updown=False,
566
+ use_new_attention_order=False,
567
+ use_spatial_transformer=False, # custom transformer support
568
+ transformer_depth=1, # custom transformer support
569
+ context_dim=None, # custom transformer support
570
+ n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
571
+ legacy=True,
572
+ ):
573
+ super().__init__()
574
+ if use_spatial_transformer:
575
+ assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
576
+
577
+ if context_dim is not None:
578
+ assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
579
+ from omegaconf.listconfig import ListConfig
580
+ if type(context_dim) == ListConfig:
581
+ context_dim = list(context_dim)
582
+
583
+ if num_heads_upsample == -1:
584
+ num_heads_upsample = num_heads
585
+
586
+ if num_heads == -1:
587
+ assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
588
+
589
+ if num_head_channels == -1:
590
+ assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
591
+
592
+ self.image_size = image_size
593
+ self.in_channels = in_channels
594
+ self.model_channels = model_channels
595
+ self.out_channels = out_channels
596
+ self.num_res_blocks = num_res_blocks
597
+ self.attention_resolutions = attention_resolutions
598
+ self.dropout = dropout
599
+ self.channel_mult = channel_mult
600
+ self.conv_resample = conv_resample
601
+ self.num_classes = num_classes
602
+ self.use_checkpoint = use_checkpoint
603
+ self.dtype = th.float16 if use_fp16 else th.float32
604
+ self.num_heads = num_heads
605
+ self.num_head_channels = num_head_channels
606
+ self.num_heads_upsample = num_heads_upsample
607
+ self.predict_codebook_ids = n_embed is not None
608
+
609
+ time_embed_dim = model_channels * 4
610
+ self.time_embed = nn.Sequential(
611
+ linear(model_channels, time_embed_dim),
612
+ nn.SiLU(),
613
+ linear(time_embed_dim, time_embed_dim),
614
+ )
615
+
616
+ if self.num_classes is not None:
617
+ self.label_emb = nn.Embedding(num_classes, time_embed_dim)
618
+
619
+ self.input_blocks = nn.ModuleList(
620
+ [
621
+ TimestepEmbedSequential(
622
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
623
+ )
624
+ ]
625
+ )
626
+ self._feature_size = model_channels
627
+ input_block_chans = [model_channels]
628
+ ch = model_channels
629
+ ds = 1
630
+ for level, mult in enumerate(channel_mult):
631
+ for _ in range(num_res_blocks):
632
+ layers = [
633
+ ResBlock(
634
+ ch,
635
+ time_embed_dim,
636
+ dropout,
637
+ out_channels=mult * model_channels,
638
+ dims=dims,
639
+ use_checkpoint=use_checkpoint,
640
+ use_scale_shift_norm=use_scale_shift_norm,
641
+ )
642
+ ]
643
+ ch = mult * model_channels
644
+ if ds in attention_resolutions:
645
+ if num_head_channels == -1:
646
+ dim_head = ch // num_heads
647
+ else:
648
+ num_heads = ch // num_head_channels
649
+ dim_head = num_head_channels
650
+ if legacy:
651
+ #num_heads = 1
652
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
653
+ layers.append(
654
+ TriplaneAttentionBlock(
655
+ ch,
656
+ use_checkpoint=use_checkpoint,
657
+ num_heads=num_heads,
658
+ num_head_channels=dim_head,
659
+ use_new_attention_order=use_new_attention_order,
660
+ )
661
+ )
662
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
663
+ self._feature_size += ch
664
+ input_block_chans.append(ch)
665
+ if level != len(channel_mult) - 1:
666
+ out_ch = ch
667
+ self.input_blocks.append(
668
+ TimestepEmbedSequential(
669
+ ResBlock(
670
+ ch,
671
+ time_embed_dim,
672
+ dropout,
673
+ out_channels=out_ch,
674
+ dims=dims,
675
+ use_checkpoint=use_checkpoint,
676
+ use_scale_shift_norm=use_scale_shift_norm,
677
+ down=True,
678
+ )
679
+ if resblock_updown
680
+ else Downsample(
681
+ ch, conv_resample, dims=dims, out_channels=out_ch
682
+ )
683
+ )
684
+ )
685
+ ch = out_ch
686
+ input_block_chans.append(ch)
687
+ ds *= 2
688
+ self._feature_size += ch
689
+
690
+ if num_head_channels == -1:
691
+ dim_head = ch // num_heads
692
+ else:
693
+ num_heads = ch // num_head_channels
694
+ dim_head = num_head_channels
695
+ if legacy:
696
+ #num_heads = 1
697
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
698
+ self.middle_block = TimestepEmbedSequential(
699
+ ResBlock(
700
+ ch,
701
+ time_embed_dim,
702
+ dropout,
703
+ dims=dims,
704
+ use_checkpoint=use_checkpoint,
705
+ use_scale_shift_norm=use_scale_shift_norm,
706
+ ),
707
+ TriplaneAttentionBlock(
708
+ ch,
709
+ use_checkpoint=use_checkpoint,
710
+ num_heads=num_heads,
711
+ num_head_channels=dim_head,
712
+ use_new_attention_order=use_new_attention_order,
713
+ ),
714
+ ResBlock(
715
+ ch,
716
+ time_embed_dim,
717
+ dropout,
718
+ dims=dims,
719
+ use_checkpoint=use_checkpoint,
720
+ use_scale_shift_norm=use_scale_shift_norm,
721
+ ),
722
+ )
723
+ self._feature_size += ch
724
+
725
+ self.output_blocks = nn.ModuleList([])
726
+ for level, mult in list(enumerate(channel_mult))[::-1]:
727
+ for i in range(num_res_blocks + 1):
728
+ ich = input_block_chans.pop()
729
+ layers = [
730
+ ResBlock(
731
+ ch + ich,
732
+ time_embed_dim,
733
+ dropout,
734
+ out_channels=model_channels * mult,
735
+ dims=dims,
736
+ use_checkpoint=use_checkpoint,
737
+ use_scale_shift_norm=use_scale_shift_norm,
738
+ )
739
+ ]
740
+ ch = model_channels * mult
741
+ if ds in attention_resolutions:
742
+ if num_head_channels == -1:
743
+ dim_head = ch // num_heads
744
+ else:
745
+ num_heads = ch // num_head_channels
746
+ dim_head = num_head_channels
747
+ if legacy:
748
+ #num_heads = 1
749
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
750
+ layers.append(
751
+ TriplaneAttentionBlock(
752
+ ch,
753
+ use_checkpoint=use_checkpoint,
754
+ num_heads=num_heads_upsample,
755
+ num_head_channels=dim_head,
756
+ use_new_attention_order=use_new_attention_order,
757
+ )
758
+ )
759
+ if level and i == num_res_blocks:
760
+ out_ch = ch
761
+ layers.append(
762
+ ResBlock(
763
+ ch,
764
+ time_embed_dim,
765
+ dropout,
766
+ out_channels=out_ch,
767
+ dims=dims,
768
+ use_checkpoint=use_checkpoint,
769
+ use_scale_shift_norm=use_scale_shift_norm,
770
+ up=True,
771
+ )
772
+ if resblock_updown
773
+ else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
774
+ )
775
+ ds //= 2
776
+ self.output_blocks.append(TimestepEmbedSequential(*layers))
777
+ self._feature_size += ch
778
+
779
+ self.out = nn.Sequential(
780
+ normalization(ch),
781
+ nn.SiLU(),
782
+ zero_module(conv_nd(dims, ch, out_channels, 3, padding=1)),
783
+ )
784
+ if self.predict_codebook_ids:
785
+ self.id_predictor = nn.Sequential(
786
+ normalization(ch),
787
+ conv_nd(dims, ch, n_embed, 1),
788
+ #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
789
+ )
790
+
791
+ def convert_to_fp16(self):
792
+ """
793
+ Convert the torso of the model to float16.
794
+ """
795
+ self.input_blocks.apply(convert_module_to_f16)
796
+ self.middle_block.apply(convert_module_to_f16)
797
+ self.output_blocks.apply(convert_module_to_f16)
798
+
799
+ def convert_to_fp32(self):
800
+ """
801
+ Convert the torso of the model to float32.
802
+ """
803
+ self.input_blocks.apply(convert_module_to_f32)
804
+ self.middle_block.apply(convert_module_to_f32)
805
+ self.output_blocks.apply(convert_module_to_f32)
806
+
807
+ def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
808
+ """
809
+ Apply the model to an input batch.
810
+ :param x: an [N x C x ...] Tensor of inputs.
811
+ :param timesteps: a 1-D batch of timesteps.
812
+ :param context: conditioning plugged in via crossattn
813
+ :param y: an [N] Tensor of labels, if class-conditional.
814
+ :return: an [N x C x ...] Tensor of outputs.
815
+ """
816
+ assert (y is not None) == (
817
+ self.num_classes is not None
818
+ ), "must specify y if and only if the model is class-conditional"
819
+ hs = []
820
+ t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
821
+ emb = self.time_embed(t_emb)
822
+
823
+ if self.num_classes is not None:
824
+ assert y.shape == (x.shape[0],)
825
+ emb = emb + self.label_emb(y)
826
+
827
+ h = x.type(self.dtype)
828
+ for module in self.input_blocks:
829
+ h = module(h, emb, context)
830
+ hs.append(h)
831
+ h = self.middle_block(h, emb, context)
832
+ for module in self.output_blocks:
833
+ h = th.cat([h, hs.pop()], dim=1)
834
+ h = module(h, emb, context)
835
+ h = h.type(x.dtype)
836
+ if self.predict_codebook_ids:
837
+ return self.id_predictor(h)
838
+ else:
839
+ return self.out(h)
840
+
841
+
842
+ class EncoderUNetModel(nn.Module):
843
+ """
844
+ The half UNet model with attention and timestep embedding.
845
+ For usage, see UNet.
846
+ """
847
+
848
+ def __init__(
849
+ self,
850
+ image_size,
851
+ in_channels,
852
+ model_channels,
853
+ out_channels,
854
+ num_res_blocks,
855
+ attention_resolutions,
856
+ dropout=0,
857
+ channel_mult=(1, 2, 4, 8),
858
+ conv_resample=True,
859
+ dims=2,
860
+ use_checkpoint=False,
861
+ use_fp16=False,
862
+ num_heads=1,
863
+ num_head_channels=-1,
864
+ num_heads_upsample=-1,
865
+ use_scale_shift_norm=False,
866
+ resblock_updown=False,
867
+ use_new_attention_order=False,
868
+ pool="adaptive",
869
+ *args,
870
+ **kwargs
871
+ ):
872
+ super().__init__()
873
+
874
+ if num_heads_upsample == -1:
875
+ num_heads_upsample = num_heads
876
+
877
+ self.in_channels = in_channels
878
+ self.model_channels = model_channels
879
+ self.out_channels = out_channels
880
+ self.num_res_blocks = num_res_blocks
881
+ self.attention_resolutions = attention_resolutions
882
+ self.dropout = dropout
883
+ self.channel_mult = channel_mult
884
+ self.conv_resample = conv_resample
885
+ self.use_checkpoint = use_checkpoint
886
+ self.dtype = th.float16 if use_fp16 else th.float32
887
+ self.num_heads = num_heads
888
+ self.num_head_channels = num_head_channels
889
+ self.num_heads_upsample = num_heads_upsample
890
+
891
+ time_embed_dim = model_channels * 4
892
+ self.time_embed = nn.Sequential(
893
+ linear(model_channels, time_embed_dim),
894
+ nn.SiLU(),
895
+ linear(time_embed_dim, time_embed_dim),
896
+ )
897
+
898
+ self.input_blocks = nn.ModuleList(
899
+ [
900
+ TimestepEmbedSequential(
901
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
902
+ )
903
+ ]
904
+ )
905
+ self._feature_size = model_channels
906
+ input_block_chans = [model_channels]
907
+ ch = model_channels
908
+ ds = 1
909
+ for level, mult in enumerate(channel_mult):
910
+ for _ in range(num_res_blocks):
911
+ layers = [
912
+ ResBlock(
913
+ ch,
914
+ time_embed_dim,
915
+ dropout,
916
+ out_channels=mult * model_channels,
917
+ dims=dims,
918
+ use_checkpoint=use_checkpoint,
919
+ use_scale_shift_norm=use_scale_shift_norm,
920
+ )
921
+ ]
922
+ ch = mult * model_channels
923
+ if ds in attention_resolutions:
924
+ layers.append(
925
+ AttentionBlock(
926
+ ch,
927
+ use_checkpoint=use_checkpoint,
928
+ num_heads=num_heads,
929
+ num_head_channels=num_head_channels,
930
+ use_new_attention_order=use_new_attention_order,
931
+ )
932
+ )
933
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
934
+ self._feature_size += ch
935
+ input_block_chans.append(ch)
936
+ if level != len(channel_mult) - 1:
937
+ out_ch = ch
938
+ self.input_blocks.append(
939
+ TimestepEmbedSequential(
940
+ ResBlock(
941
+ ch,
942
+ time_embed_dim,
943
+ dropout,
944
+ out_channels=out_ch,
945
+ dims=dims,
946
+ use_checkpoint=use_checkpoint,
947
+ use_scale_shift_norm=use_scale_shift_norm,
948
+ down=True,
949
+ )
950
+ if resblock_updown
951
+ else Downsample(
952
+ ch, conv_resample, dims=dims, out_channels=out_ch
953
+ )
954
+ )
955
+ )
956
+ ch = out_ch
957
+ input_block_chans.append(ch)
958
+ ds *= 2
959
+ self._feature_size += ch
960
+
961
+ self.middle_block = TimestepEmbedSequential(
962
+ ResBlock(
963
+ ch,
964
+ time_embed_dim,
965
+ dropout,
966
+ dims=dims,
967
+ use_checkpoint=use_checkpoint,
968
+ use_scale_shift_norm=use_scale_shift_norm,
969
+ ),
970
+ AttentionBlock(
971
+ ch,
972
+ use_checkpoint=use_checkpoint,
973
+ num_heads=num_heads,
974
+ num_head_channels=num_head_channels,
975
+ use_new_attention_order=use_new_attention_order,
976
+ ),
977
+ ResBlock(
978
+ ch,
979
+ time_embed_dim,
980
+ dropout,
981
+ dims=dims,
982
+ use_checkpoint=use_checkpoint,
983
+ use_scale_shift_norm=use_scale_shift_norm,
984
+ ),
985
+ )
986
+ self._feature_size += ch
987
+ self.pool = pool
988
+ if pool == "adaptive":
989
+ self.out = nn.Sequential(
990
+ normalization(ch),
991
+ nn.SiLU(),
992
+ nn.AdaptiveAvgPool2d((1, 1)),
993
+ zero_module(conv_nd(dims, ch, out_channels, 1)),
994
+ nn.Flatten(),
995
+ )
996
+ elif pool == "attention":
997
+ assert num_head_channels != -1
998
+ self.out = nn.Sequential(
999
+ normalization(ch),
1000
+ nn.SiLU(),
1001
+ AttentionPool2d(
1002
+ (image_size // ds), ch, num_head_channels, out_channels
1003
+ ),
1004
+ )
1005
+ elif pool == "spatial":
1006
+ self.out = nn.Sequential(
1007
+ nn.Linear(self._feature_size, 2048),
1008
+ nn.ReLU(),
1009
+ nn.Linear(2048, self.out_channels),
1010
+ )
1011
+ elif pool == "spatial_v2":
1012
+ self.out = nn.Sequential(
1013
+ nn.Linear(self._feature_size, 2048),
1014
+ normalization(2048),
1015
+ nn.SiLU(),
1016
+ nn.Linear(2048, self.out_channels),
1017
+ )
1018
+ else:
1019
+ raise NotImplementedError(f"Unexpected {pool} pooling")
1020
+
1021
+ def convert_to_fp16(self):
1022
+ """
1023
+ Convert the torso of the model to float16.
1024
+ """
1025
+ self.input_blocks.apply(convert_module_to_f16)
1026
+ self.middle_block.apply(convert_module_to_f16)
1027
+
1028
+ def convert_to_fp32(self):
1029
+ """
1030
+ Convert the torso of the model to float32.
1031
+ """
1032
+ self.input_blocks.apply(convert_module_to_f32)
1033
+ self.middle_block.apply(convert_module_to_f32)
1034
+
1035
+ def forward(self, x, timesteps):
1036
+ """
1037
+ Apply the model to an input batch.
1038
+ :param x: an [N x C x ...] Tensor of inputs.
1039
+ :param timesteps: a 1-D batch of timesteps.
1040
+ :return: an [N x K] Tensor of outputs.
1041
+ """
1042
+ emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
1043
+
1044
+ results = []
1045
+ h = x.type(self.dtype)
1046
+ for module in self.input_blocks:
1047
+ h = module(h, emb)
1048
+ if self.pool.startswith("spatial"):
1049
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
1050
+ h = self.middle_block(h, emb)
1051
+ if self.pool.startswith("spatial"):
1052
+ results.append(h.type(x.dtype).mean(dim=(2, 3)))
1053
+ h = th.cat(results, axis=-1)
1054
+ return self.out(h)
1055
+ else:
1056
+ h = h.type(x.dtype)
1057
+ return self.out(h)
1058
+
3DTopia/ldm/modules/diffusionmodules/util.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # adopted from
2
+ # https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
3
+ # and
4
+ # https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
5
+ # and
6
+ # https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
7
+ #
8
+ # thanks!
9
+
10
+
11
+ import os
12
+ import math
13
+ import torch
14
+ import torch.nn as nn
15
+ import numpy as np
16
+ from einops import repeat
17
+
18
+ from ldm.util import instantiate_from_config
19
+
20
+ def force_zero_snr(betas):
21
+ alphas = 1 - betas
22
+ alphas_bar = torch.cumprod(alphas, dim=0)
23
+ alphas_bar_sqrt = alphas_bar ** (1/2)
24
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
25
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() - 1e-6
26
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
27
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
28
+ alphas_bar = alphas_bar_sqrt ** 2
29
+ alphas = alphas_bar[1:] / alphas_bar[:-1]
30
+ alphas = torch.cat([alphas_bar[0:1], alphas], 0)
31
+ betas = 1 - alphas
32
+ return betas
33
+
34
+ def shift_schedule(base_betas, shift_scale):
35
+ alphas = 1 - base_betas
36
+ alphas_bar = torch.cumprod(alphas, dim=0)
37
+ snr = alphas_bar / (1 - alphas_bar) # snr(1-ab)=ab; snr-snr*ab=ab; snr=(1+snr)ab; ab=snr/(1+snr)
38
+ shifted_snr = snr * ((1 / shift_scale) ** 2)
39
+ shifted_alphas_bar = shifted_snr / (1 + shifted_snr)
40
+ shifted_alphas = shifted_alphas_bar[1:] / shifted_alphas_bar[:-1]
41
+ shifted_alphas = torch.cat([shifted_alphas_bar[0:1], shifted_alphas], 0)
42
+ shifted_betas = 1 - shifted_alphas
43
+ return shifted_betas
44
+
45
+ def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, shift_scale=None):
46
+ if schedule == "linear":
47
+ betas = (
48
+ torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
49
+ )
50
+
51
+ elif schedule == "cosine":
52
+ timesteps = (
53
+ torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
54
+ )
55
+ alphas = timesteps / (1 + cosine_s) * np.pi / 2
56
+ alphas = torch.cos(alphas).pow(2)
57
+ alphas = alphas / alphas[0]
58
+ betas = 1 - alphas[1:] / alphas[:-1]
59
+ betas = np.clip(betas, a_min=0, a_max=0.999)
60
+
61
+ elif schedule == "sqrt_linear":
62
+ betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
63
+ elif schedule == "sqrt":
64
+ betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
65
+ elif schedule == 'linear_force_zero_snr':
66
+ betas = (
67
+ torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
68
+ )
69
+ betas = force_zero_snr(betas)
70
+ elif schedule == 'linear_100':
71
+ betas = (
72
+ torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
73
+ )
74
+ betas = betas[:100]
75
+ else:
76
+ raise ValueError(f"schedule '{schedule}' unknown.")
77
+
78
+ if shift_scale is not None:
79
+ betas = shift_schedule(betas, shift_scale)
80
+
81
+ return betas.numpy()
82
+
83
+
84
+ def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
85
+ if ddim_discr_method == 'uniform':
86
+ c = num_ddpm_timesteps // num_ddim_timesteps
87
+ ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
88
+ elif ddim_discr_method == 'quad':
89
+ ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
90
+ else:
91
+ raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
92
+
93
+ # assert ddim_timesteps.shape[0] == num_ddim_timesteps
94
+ # add one to get the final alpha values right (the ones from first scale to data during sampling)
95
+ steps_out = ddim_timesteps + 1
96
+ if verbose:
97
+ print(f'Selected timesteps for ddim sampler: {steps_out}')
98
+ return steps_out
99
+
100
+
101
+ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
102
+ # select alphas for computing the variance schedule
103
+ alphas = alphacums[ddim_timesteps]
104
+ alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
105
+
106
+ # according the the formula provided in https://arxiv.org/abs/2010.02502
107
+ sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
108
+ if verbose:
109
+ print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
110
+ print(f'For the chosen value of eta, which is {eta}, '
111
+ f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
112
+ return sigmas, alphas, alphas_prev
113
+
114
+
115
+ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
116
+ """
117
+ Create a beta schedule that discretizes the given alpha_t_bar function,
118
+ which defines the cumulative product of (1-beta) over time from t = [0,1].
119
+ :param num_diffusion_timesteps: the number of betas to produce.
120
+ :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
121
+ produces the cumulative product of (1-beta) up to that
122
+ part of the diffusion process.
123
+ :param max_beta: the maximum beta to use; use values lower than 1 to
124
+ prevent singularities.
125
+ """
126
+ betas = []
127
+ for i in range(num_diffusion_timesteps):
128
+ t1 = i / num_diffusion_timesteps
129
+ t2 = (i + 1) / num_diffusion_timesteps
130
+ betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
131
+ return np.array(betas)
132
+
133
+
134
+ def extract_into_tensor(a, t, x_shape):
135
+ b, *_ = t.shape
136
+ out = a.gather(-1, t)
137
+ return out.reshape(b, *((1,) * (len(x_shape) - 1)))
138
+
139
+
140
+ def checkpoint(func, inputs, params, flag):
141
+ """
142
+ Evaluate a function without caching intermediate activations, allowing for
143
+ reduced memory at the expense of extra compute in the backward pass.
144
+ :param func: the function to evaluate.
145
+ :param inputs: the argument sequence to pass to `func`.
146
+ :param params: a sequence of parameters `func` depends on but does not
147
+ explicitly take as arguments.
148
+ :param flag: if False, disable gradient checkpointing.
149
+ """
150
+ if flag:
151
+ args = tuple(inputs) + tuple(params)
152
+ return CheckpointFunction.apply(func, len(inputs), *args)
153
+ else:
154
+ return func(*inputs)
155
+
156
+
157
+ class CheckpointFunction(torch.autograd.Function):
158
+ @staticmethod
159
+ def forward(ctx, run_function, length, *args):
160
+ ctx.run_function = run_function
161
+ ctx.input_tensors = list(args[:length])
162
+ ctx.input_params = list(args[length:])
163
+
164
+ with torch.no_grad():
165
+ output_tensors = ctx.run_function(*ctx.input_tensors)
166
+ return output_tensors
167
+
168
+ @staticmethod
169
+ def backward(ctx, *output_grads):
170
+ ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
171
+ with torch.enable_grad():
172
+ # Fixes a bug where the first op in run_function modifies the
173
+ # Tensor storage in place, which is not allowed for detach()'d
174
+ # Tensors.
175
+ shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
176
+ output_tensors = ctx.run_function(*shallow_copies)
177
+ input_grads = torch.autograd.grad(
178
+ output_tensors,
179
+ ctx.input_tensors + ctx.input_params,
180
+ output_grads,
181
+ allow_unused=True,
182
+ )
183
+ del ctx.input_tensors
184
+ del ctx.input_params
185
+ del output_tensors
186
+ return (None, None) + input_grads
187
+
188
+
189
+ def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
190
+ """
191
+ Create sinusoidal timestep embeddings.
192
+ :param timesteps: a 1-D Tensor of N indices, one per batch element.
193
+ These may be fractional.
194
+ :param dim: the dimension of the output.
195
+ :param max_period: controls the minimum frequency of the embeddings.
196
+ :return: an [N x dim] Tensor of positional embeddings.
197
+ """
198
+ if not repeat_only:
199
+ half = dim // 2
200
+ freqs = torch.exp(
201
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
202
+ ).to(device=timesteps.device)
203
+ args = timesteps[:, None].float() * freqs[None]
204
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
205
+ if dim % 2:
206
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
207
+ else:
208
+ embedding = repeat(timesteps, 'b -> b d', d=dim)
209
+ return embedding
210
+
211
+
212
+ def zero_module(module):
213
+ """
214
+ Zero out the parameters of a module and return it.
215
+ """
216
+ for p in module.parameters():
217
+ p.detach().zero_()
218
+ return module
219
+
220
+
221
+ def scale_module(module, scale):
222
+ """
223
+ Scale the parameters of a module and return it.
224
+ """
225
+ for p in module.parameters():
226
+ p.detach().mul_(scale)
227
+ return module
228
+
229
+
230
+ def mean_flat(tensor):
231
+ """
232
+ Take the mean over all non-batch dimensions.
233
+ """
234
+ return tensor.mean(dim=list(range(1, len(tensor.shape))))
235
+
236
+
237
+ def normalization(channels):
238
+ """
239
+ Make a standard normalization layer.
240
+ :param channels: number of input channels.
241
+ :return: an nn.Module for normalization.
242
+ """
243
+ return GroupNorm32(32, channels)
244
+
245
+
246
+ # PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
247
+ class SiLU(nn.Module):
248
+ def forward(self, x):
249
+ return x * torch.sigmoid(x)
250
+
251
+
252
+ class GroupNorm32(nn.GroupNorm):
253
+ def forward(self, x):
254
+ return super().forward(x.float()).type(x.dtype)
255
+
256
+ def conv_nd(dims, *args, **kwargs):
257
+ """
258
+ Create a 1D, 2D, or 3D convolution module.
259
+ """
260
+ if dims == 1:
261
+ return nn.Conv1d(*args, **kwargs)
262
+ elif dims == 2:
263
+ return nn.Conv2d(*args, **kwargs)
264
+ elif dims == 3:
265
+ return nn.Conv3d(*args, **kwargs)
266
+ raise ValueError(f"unsupported dimensions: {dims}")
267
+
268
+
269
+ def linear(*args, **kwargs):
270
+ """
271
+ Create a linear module.
272
+ """
273
+ return nn.Linear(*args, **kwargs)
274
+
275
+
276
+ def avg_pool_nd(dims, *args, **kwargs):
277
+ """
278
+ Create a 1D, 2D, or 3D average pooling module.
279
+ """
280
+ if dims == 1:
281
+ return nn.AvgPool1d(*args, **kwargs)
282
+ elif dims == 2:
283
+ return nn.AvgPool2d(*args, **kwargs)
284
+ elif dims == 3:
285
+ return nn.AvgPool3d(*args, **kwargs)
286
+ raise ValueError(f"unsupported dimensions: {dims}")
287
+
288
+
289
+ class HybridConditioner(nn.Module):
290
+
291
+ def __init__(self, c_concat_config, c_crossattn_config):
292
+ super().__init__()
293
+ self.concat_conditioner = instantiate_from_config(c_concat_config)
294
+ self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
295
+
296
+ def forward(self, c_concat, c_crossattn):
297
+ c_concat = self.concat_conditioner(c_concat)
298
+ c_crossattn = self.crossattn_conditioner(c_crossattn)
299
+ return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
300
+
301
+
302
+ def noise_like(shape, device, repeat=False):
303
+ repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
304
+ noise = lambda: torch.randn(shape, device=device)
305
+ return repeat_noise() if repeat else noise()
3DTopia/ldm/modules/distributions/__init__.py ADDED
File without changes
3DTopia/ldm/modules/distributions/distributions.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+
4
+
5
+ class AbstractDistribution:
6
+ def sample(self):
7
+ raise NotImplementedError()
8
+
9
+ def mode(self):
10
+ raise NotImplementedError()
11
+
12
+
13
+ class DiracDistribution(AbstractDistribution):
14
+ def __init__(self, value):
15
+ self.value = value
16
+
17
+ def sample(self):
18
+ return self.value
19
+
20
+ def mode(self):
21
+ return self.value
22
+
23
+
24
+ class DiagonalGaussianDistribution(object):
25
+ def __init__(self, parameters, deterministic=False):
26
+ self.parameters = parameters
27
+ self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
28
+ self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
29
+ self.deterministic = deterministic
30
+ self.std = torch.exp(0.5 * self.logvar)
31
+ self.var = torch.exp(self.logvar)
32
+ if self.deterministic:
33
+ self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
34
+
35
+ def sample(self):
36
+ x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
37
+ return x
38
+
39
+ def kl(self, other=None):
40
+ if self.deterministic:
41
+ return torch.Tensor([0.])
42
+ else:
43
+ if other is None:
44
+ return 0.5 * torch.sum(torch.pow(self.mean, 2)
45
+ + self.var - 1.0 - self.logvar,
46
+ dim=[1, 2, 3])
47
+ else:
48
+ return 0.5 * torch.sum(
49
+ torch.pow(self.mean - other.mean, 2) / other.var
50
+ + self.var / other.var - 1.0 - self.logvar + other.logvar,
51
+ dim=[1, 2, 3])
52
+
53
+ def nll(self, sample, dims=[1,2,3]):
54
+ if self.deterministic:
55
+ return torch.Tensor([0.])
56
+ logtwopi = np.log(2.0 * np.pi)
57
+ return 0.5 * torch.sum(
58
+ logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
59
+ dim=dims)
60
+
61
+ def mode(self):
62
+ return self.mean
63
+
64
+
65
+ def normal_kl(mean1, logvar1, mean2, logvar2):
66
+ """
67
+ source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
68
+ Compute the KL divergence between two gaussians.
69
+ Shapes are automatically broadcasted, so batches can be compared to
70
+ scalars, among other use cases.
71
+ """
72
+ tensor = None
73
+ for obj in (mean1, logvar1, mean2, logvar2):
74
+ if isinstance(obj, torch.Tensor):
75
+ tensor = obj
76
+ break
77
+ assert tensor is not None, "at least one argument must be a Tensor"
78
+
79
+ # Force variances to be Tensors. Broadcasting helps convert scalars to
80
+ # Tensors, but it does not work for torch.exp().
81
+ logvar1, logvar2 = [
82
+ x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
83
+ for x in (logvar1, logvar2)
84
+ ]
85
+
86
+ return 0.5 * (
87
+ -1.0
88
+ + logvar2
89
+ - logvar1
90
+ + torch.exp(logvar1 - logvar2)
91
+ + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
92
+ )
3DTopia/ldm/modules/ema.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+ class LitEma(nn.Module):
6
+ def __init__(self, model, decay=0.9999, use_num_upates=True):
7
+ super().__init__()
8
+ if decay < 0.0 or decay > 1.0:
9
+ raise ValueError('Decay must be between 0 and 1')
10
+
11
+ self.m_name2s_name = {}
12
+ self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
13
+ self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates
14
+ else torch.tensor(-1,dtype=torch.int))
15
+
16
+ for name, p in model.named_parameters():
17
+ if p.requires_grad:
18
+ #remove as '.'-character is not allowed in buffers
19
+ s_name = name.replace('.','')
20
+ self.m_name2s_name.update({name:s_name})
21
+ self.register_buffer(s_name,p.clone().detach().data)
22
+
23
+ self.collected_params = []
24
+
25
+ def forward(self,model):
26
+ decay = self.decay
27
+
28
+ if self.num_updates >= 0:
29
+ self.num_updates += 1
30
+ decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))
31
+
32
+ one_minus_decay = 1.0 - decay
33
+
34
+ with torch.no_grad():
35
+ m_param = dict(model.named_parameters())
36
+ shadow_params = dict(self.named_buffers())
37
+
38
+ for key in m_param:
39
+ if m_param[key].requires_grad:
40
+ sname = self.m_name2s_name[key]
41
+ shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
42
+ shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
43
+ else:
44
+ assert not key in self.m_name2s_name
45
+
46
+ def copy_to(self, model):
47
+ m_param = dict(model.named_parameters())
48
+ shadow_params = dict(self.named_buffers())
49
+ for key in m_param:
50
+ if m_param[key].requires_grad:
51
+ m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
52
+ else:
53
+ assert not key in self.m_name2s_name
54
+
55
+ def store(self, parameters):
56
+ """
57
+ Save the current parameters for restoring later.
58
+ Args:
59
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
60
+ temporarily stored.
61
+ """
62
+ self.collected_params = [param.clone() for param in parameters]
63
+
64
+ def restore(self, parameters):
65
+ """
66
+ Restore the parameters stored with the `store` method.
67
+ Useful to validate the model with EMA parameters without affecting the
68
+ original optimization process. Store the parameters before the
69
+ `copy_to` method. After validation (or model saving), use this to
70
+ restore the former parameters.
71
+ Args:
72
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
73
+ updated with the stored parameters.
74
+ """
75
+ for c_param, param in zip(self.collected_params, parameters):
76
+ param.data.copy_(c_param.data)
3DTopia/ldm/modules/encoders/__init__.py ADDED
File without changes
3DTopia/ldm/modules/encoders/modules.py ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from functools import partial
4
+ import clip
5
+ from einops import rearrange, repeat
6
+ from transformers import CLIPTokenizer, CLIPTextModel
7
+ import kornia
8
+
9
+ from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
10
+
11
+
12
+ class AbstractEncoder(nn.Module):
13
+ def __init__(self):
14
+ super().__init__()
15
+
16
+ def encode(self, *args, **kwargs):
17
+ raise NotImplementedError
18
+
19
+
20
+
21
+ class ClassEmbedder(nn.Module):
22
+ def __init__(self, embed_dim, n_classes=1000, key='class'):
23
+ super().__init__()
24
+ self.key = key
25
+ self.embedding = nn.Embedding(n_classes, embed_dim)
26
+
27
+ def forward(self, batch, key=None):
28
+ if key is None:
29
+ key = self.key
30
+ # this is for use in crossattn
31
+ c = batch[key][:, None]
32
+ c = self.embedding(c)
33
+ return c
34
+
35
+
36
+ class TransformerEmbedder(AbstractEncoder):
37
+ """Some transformer encoder layers"""
38
+ def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
39
+ super().__init__()
40
+ self.device = device
41
+ self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
42
+ attn_layers=Encoder(dim=n_embed, depth=n_layer))
43
+
44
+ def forward(self, tokens):
45
+ tokens = tokens.to(self.device) # meh
46
+ z = self.transformer(tokens, return_embeddings=True)
47
+ return z
48
+
49
+ def encode(self, x):
50
+ return self(x)
51
+
52
+
53
+ class BERTTokenizer(AbstractEncoder):
54
+ """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)"""
55
+ def __init__(self, device="cuda", vq_interface=True, max_length=77):
56
+ super().__init__()
57
+ from transformers import BertTokenizerFast # TODO: add to reuquirements
58
+ self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
59
+ self.device = device
60
+ self.vq_interface = vq_interface
61
+ self.max_length = max_length
62
+
63
+ def forward(self, text):
64
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
65
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
66
+ tokens = batch_encoding["input_ids"].to(self.device)
67
+ return tokens
68
+
69
+ @torch.no_grad()
70
+ def encode(self, text):
71
+ tokens = self(text)
72
+ if not self.vq_interface:
73
+ return tokens
74
+ return None, None, [None, None, tokens]
75
+
76
+ def decode(self, text):
77
+ return text
78
+
79
+
80
+ class BERTEmbedder(AbstractEncoder):
81
+ """Uses the BERT tokenizr model and add some transformer encoder layers"""
82
+ def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77,
83
+ device="cuda",use_tokenizer=True, embedding_dropout=0.0):
84
+ super().__init__()
85
+ self.use_tknz_fn = use_tokenizer
86
+ if self.use_tknz_fn:
87
+ self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len)
88
+ self.device = device
89
+ self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
90
+ attn_layers=Encoder(dim=n_embed, depth=n_layer),
91
+ emb_dropout=embedding_dropout)
92
+
93
+ def forward(self, text):
94
+ if self.use_tknz_fn:
95
+ tokens = self.tknz_fn(text)#.to(self.device)
96
+ else:
97
+ tokens = text
98
+ z = self.transformer(tokens, return_embeddings=True)
99
+ return z
100
+
101
+ def encode(self, text):
102
+ # output of length 77
103
+ return self(text)
104
+
105
+
106
+ class SpatialRescaler(nn.Module):
107
+ def __init__(self,
108
+ n_stages=1,
109
+ method='bilinear',
110
+ multiplier=0.5,
111
+ in_channels=3,
112
+ out_channels=None,
113
+ bias=False):
114
+ super().__init__()
115
+ self.n_stages = n_stages
116
+ assert self.n_stages >= 0
117
+ assert method in ['nearest','linear','bilinear','trilinear','bicubic','area']
118
+ self.multiplier = multiplier
119
+ self.interpolator = partial(torch.nn.functional.interpolate, mode=method)
120
+ self.remap_output = out_channels is not None
121
+ if self.remap_output:
122
+ print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.')
123
+ self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias)
124
+
125
+ def forward(self,x):
126
+ for stage in range(self.n_stages):
127
+ x = self.interpolator(x, scale_factor=self.multiplier)
128
+
129
+
130
+ if self.remap_output:
131
+ x = self.channel_mapper(x)
132
+ return x
133
+
134
+ def encode(self, x):
135
+ return self(x)
136
+
137
+ class FrozenCLIPEmbedder(AbstractEncoder):
138
+ """Uses the CLIP transformer encoder for text (from Hugging Face)"""
139
+ def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77):
140
+ super().__init__()
141
+ self.tokenizer = CLIPTokenizer.from_pretrained(version)
142
+ self.transformer = CLIPTextModel.from_pretrained(version)
143
+ self.device = device
144
+ self.max_length = max_length
145
+ self.freeze()
146
+
147
+ def freeze(self):
148
+ self.transformer = self.transformer.eval()
149
+ for param in self.parameters():
150
+ param.requires_grad = False
151
+
152
+ def forward(self, text):
153
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
154
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
155
+ tokens = batch_encoding["input_ids"].to(self.device)
156
+ outputs = self.transformer(input_ids=tokens)
157
+
158
+ z = outputs.last_hidden_state
159
+ return z
160
+
161
+ def encode(self, text):
162
+ return self(text)
163
+
164
+ import hashlib
165
+ import os
166
+ import urllib
167
+ import warnings
168
+ from typing import Any, Union, List
169
+ from pkg_resources import packaging
170
+
171
+ import torch
172
+ from PIL import Image
173
+ from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
174
+ from tqdm import tqdm
175
+
176
+ from clip.simple_tokenizer import SimpleTokenizer as _Tokenizer
177
+
178
+ try:
179
+ from torchvision.transforms import InterpolationMode
180
+ BICUBIC = InterpolationMode.BICUBIC
181
+ except ImportError:
182
+ BICUBIC = Image.BICUBIC
183
+
184
+
185
+ if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
186
+ warnings.warn("PyTorch version 1.7.1 or higher is recommended")
187
+
188
+
189
+ __all__ = ["available_models", "load", "tokenize"]
190
+ _tokenizer = _Tokenizer()
191
+
192
+ def tokenize_with_truncation(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor:
193
+ """
194
+ Returns the tokenized representation of given input string(s)
195
+
196
+ Parameters
197
+ ----------
198
+ texts : Union[str, List[str]]
199
+ An input string or a list of input strings to tokenize
200
+
201
+ context_length : int
202
+ The context length to use; all CLIP models use 77 as the context length
203
+
204
+ truncate: bool
205
+ Whether to truncate the text in case its encoding is longer than the context length
206
+
207
+ Returns
208
+ -------
209
+ A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
210
+ """
211
+ if isinstance(texts, str):
212
+ texts = [texts]
213
+
214
+ sot_token = _tokenizer.encoder["<|startoftext|>"]
215
+ eot_token = _tokenizer.encoder["<|endoftext|>"]
216
+ all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
217
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
218
+
219
+ for i, tokens in enumerate(all_tokens):
220
+ if len(tokens) > context_length:
221
+ if truncate:
222
+ tokens = tokens[:context_length]
223
+ tokens[-1] = eot_token
224
+ else:
225
+ raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
226
+ result[i, :len(tokens)] = torch.tensor(tokens)
227
+
228
+ return result
229
+
230
+ class FrozenCLIPTextEmbedder(nn.Module):
231
+ """
232
+ Uses the CLIP transformer encoder for text.
233
+ """
234
+ def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True):
235
+ super().__init__()
236
+ self.model, _ = clip.load(version, jit=False, device="cpu")
237
+ self.device = device
238
+ self.max_length = max_length
239
+ self.n_repeat = n_repeat
240
+ self.normalize = normalize
241
+
242
+ def freeze(self):
243
+ self.model = self.model.eval()
244
+ for param in self.parameters():
245
+ param.requires_grad = False
246
+
247
+ def forward(self, text):
248
+ # tokens = clip.tokenize(text).to(self.device)
249
+ tokens = tokenize_with_truncation(text, truncate=True).to(self.device)
250
+ z = self.model.encode_text(tokens)
251
+ if self.normalize:
252
+ z = z / torch.linalg.norm(z, dim=1, keepdim=True)
253
+ return z
254
+
255
+ def encode(self, text):
256
+ z = self(text)
257
+ if z.ndim==2:
258
+ z = z[:, None, :]
259
+ z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat)
260
+ return z
261
+
262
+
263
+ class FrozenClipImageEmbedder(nn.Module):
264
+ """
265
+ Uses the CLIP image encoder.
266
+ """
267
+ def __init__(
268
+ self,
269
+ model='ViT-L/14',
270
+ jit=False,
271
+ device='cuda' if torch.cuda.is_available() else 'cpu',
272
+ antialias=False,
273
+ ):
274
+ super().__init__()
275
+ # self.model, _ = clip.load(name=model, device=device, jit=jit)
276
+ self.model, _ = clip.load(name=model, device=device)
277
+
278
+ self.antialias = antialias
279
+
280
+ self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)
281
+ self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)
282
+
283
+ def preprocess(self, x):
284
+ # normalize to [0,1]
285
+ x = kornia.geometry.resize(x, (224, 224),
286
+ interpolation='bicubic',align_corners=True,
287
+ antialias=self.antialias)
288
+ # x = (x + 1.) / 2.
289
+ # renormalize according to clip
290
+ x = kornia.enhance.normalize(x, self.mean, self.std)
291
+
292
+ return x
293
+
294
+ def forward(self, x):
295
+ # x is assumed to be in range [-1,1]
296
+ z = self.model.encode_image(self.preprocess(x))
297
+ if z.ndim==2:
298
+ z = z[:, None, :]
299
+ return z
300
+
301
+
302
+ ############### OPENCLIP #################
303
+ import open_clip
304
+
305
+ class OpenClipTextEmbedder(nn.Module):
306
+ def __init__(
307
+ self,
308
+ model='ViT-bigG-14',
309
+ pretrained='laion2b_s39b_b160k',
310
+ device='cuda' if torch.cuda.is_available() else 'cpu',
311
+ normalize=True,):
312
+ super().__init__()
313
+ self.model, _, _ = open_clip.create_model_and_transforms(model, pretrained=pretrained, device='cpu')
314
+ self.tokenizer = open_clip.get_tokenizer(model)
315
+ self.normalize = normalize
316
+ self.device = device
317
+
318
+ def freeze(self):
319
+ self.model = self.model.eval()
320
+ for param in self.parameters():
321
+ param.requires_grad = False
322
+
323
+ def forward(self, text):
324
+ tok_text = self.tokenizer(text).to(self.device)
325
+ z = self.model.encode_text(tok_text)
326
+ if self.normalize:
327
+ z = z / torch.linalg.norm(z, dim=1, keepdim=True)
328
+ return z
329
+
330
+ def encode(self, text):
331
+ z = self(text)
332
+ if z.ndim==2:
333
+ z = z[:, None, :]
334
+ z = repeat(z, 'b 1 d -> b k d', k=1)
335
+ return z
336
+
337
+ class OpenClipImageEmbedder(nn.Module):
338
+ def __init__(
339
+ self,
340
+ model='ViT-bigG-14',
341
+ pretrained='laion2b_s39b_b160k',
342
+ device='cuda' if torch.cuda.is_available() else 'cpu',
343
+ ):
344
+ super().__init__()
345
+ self.model, _, _ = open_clip.create_model_and_transforms(model, pretrained=pretrained, device=device)
346
+ self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)
347
+ self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)
348
+
349
+ def preprocess(self, x):
350
+ x = kornia.geometry.resize(x, (224, 224),
351
+ interpolation='bicubic',align_corners=True,
352
+ antialias=self.antialias)
353
+ x = kornia.enhance.normalize(x, self.mean, self.std)
354
+ return x
355
+
356
+ def forward(self, x):
357
+ z = self.model.encode_image(self.preprocess(x))
358
+ if z.ndim==2:
359
+ z = z[:, None, :]
360
+ return z
361
+
362
+ class DinoV2(nn.Module):
363
+ def __init__(self, model='dinov2_vitb14', ckpt='dino_ckpt/dinov2_vitb14_pretrain.pth'):
364
+ super().__init__()
365
+ # device='cuda' if torch.cuda.is_available() else 'cpu'
366
+ # self.model = torch.hub.load('facebookresearch/dinov2', model)
367
+ self.model = torch.hub.load('dinov2', model, source='local', pretrained=False)
368
+ self.model.load_state_dict(torch.load(ckpt))
369
+ # self.model = self.model.to(device)
370
+ self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]), persistent=False)
371
+ self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]), persistent=False)
372
+
373
+ def preprocess(self, x):
374
+ x = kornia.geometry.resize(x, (224, 224),
375
+ interpolation='bicubic',align_corners=True,
376
+ antialias=False)
377
+ x = kornia.enhance.normalize(x, self.mean, self.std)
378
+ return x
379
+
380
+ def forward(self, x):
381
+ return self.model.forward_features(self.preprocess(x))['x_norm_patchtokens']
382
+
383
+ if __name__ == "__main__":
384
+ from ldm.util import count_params
385
+ model = FrozenCLIPEmbedder()
386
+ count_params(model, verbose=True)
3DTopia/ldm/modules/image_degradation/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
1
+ from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr
2
+ from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light
3DTopia/ldm/modules/image_degradation/bsrgan.py ADDED
@@ -0,0 +1,730 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ # --------------------------------------------
4
+ # Super-Resolution
5
+ # --------------------------------------------
6
+ #
7
+ # Kai Zhang (cskaizhang@gmail.com)
8
+ # https://github.com/cszn
9
+ # From 2019/03--2021/08
10
+ # --------------------------------------------
11
+ """
12
+
13
+ import numpy as np
14
+ import cv2
15
+ import torch
16
+
17
+ from functools import partial
18
+ import random
19
+ from scipy import ndimage
20
+ import scipy
21
+ import scipy.stats as ss
22
+ from scipy.interpolate import interp2d
23
+ from scipy.linalg import orth
24
+ import albumentations
25
+
26
+ import ldm.modules.image_degradation.utils_image as util
27
+
28
+
29
+ def modcrop_np(img, sf):
30
+ '''
31
+ Args:
32
+ img: numpy image, WxH or WxHxC
33
+ sf: scale factor
34
+ Return:
35
+ cropped image
36
+ '''
37
+ w, h = img.shape[:2]
38
+ im = np.copy(img)
39
+ return im[:w - w % sf, :h - h % sf, ...]
40
+
41
+
42
+ """
43
+ # --------------------------------------------
44
+ # anisotropic Gaussian kernels
45
+ # --------------------------------------------
46
+ """
47
+
48
+
49
+ def analytic_kernel(k):
50
+ """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
51
+ k_size = k.shape[0]
52
+ # Calculate the big kernels size
53
+ big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
54
+ # Loop over the small kernel to fill the big one
55
+ for r in range(k_size):
56
+ for c in range(k_size):
57
+ big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
58
+ # Crop the edges of the big kernel to ignore very small values and increase run time of SR
59
+ crop = k_size // 2
60
+ cropped_big_k = big_k[crop:-crop, crop:-crop]
61
+ # Normalize to 1
62
+ return cropped_big_k / cropped_big_k.sum()
63
+
64
+
65
+ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
66
+ """ generate an anisotropic Gaussian kernel
67
+ Args:
68
+ ksize : e.g., 15, kernel size
69
+ theta : [0, pi], rotation angle range
70
+ l1 : [0.1,50], scaling of eigenvalues
71
+ l2 : [0.1,l1], scaling of eigenvalues
72
+ If l1 = l2, will get an isotropic Gaussian kernel.
73
+ Returns:
74
+ k : kernel
75
+ """
76
+
77
+ v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
78
+ V = np.array([[v[0], v[1]], [v[1], -v[0]]])
79
+ D = np.array([[l1, 0], [0, l2]])
80
+ Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
81
+ k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
82
+
83
+ return k
84
+
85
+
86
+ def gm_blur_kernel(mean, cov, size=15):
87
+ center = size / 2.0 + 0.5
88
+ k = np.zeros([size, size])
89
+ for y in range(size):
90
+ for x in range(size):
91
+ cy = y - center + 1
92
+ cx = x - center + 1
93
+ k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
94
+
95
+ k = k / np.sum(k)
96
+ return k
97
+
98
+
99
+ def shift_pixel(x, sf, upper_left=True):
100
+ """shift pixel for super-resolution with different scale factors
101
+ Args:
102
+ x: WxHxC or WxH
103
+ sf: scale factor
104
+ upper_left: shift direction
105
+ """
106
+ h, w = x.shape[:2]
107
+ shift = (sf - 1) * 0.5
108
+ xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
109
+ if upper_left:
110
+ x1 = xv + shift
111
+ y1 = yv + shift
112
+ else:
113
+ x1 = xv - shift
114
+ y1 = yv - shift
115
+
116
+ x1 = np.clip(x1, 0, w - 1)
117
+ y1 = np.clip(y1, 0, h - 1)
118
+
119
+ if x.ndim == 2:
120
+ x = interp2d(xv, yv, x)(x1, y1)
121
+ if x.ndim == 3:
122
+ for i in range(x.shape[-1]):
123
+ x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
124
+
125
+ return x
126
+
127
+
128
+ def blur(x, k):
129
+ '''
130
+ x: image, NxcxHxW
131
+ k: kernel, Nx1xhxw
132
+ '''
133
+ n, c = x.shape[:2]
134
+ p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
135
+ x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
136
+ k = k.repeat(1, c, 1, 1)
137
+ k = k.view(-1, 1, k.shape[2], k.shape[3])
138
+ x = x.view(1, -1, x.shape[2], x.shape[3])
139
+ x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
140
+ x = x.view(n, c, x.shape[2], x.shape[3])
141
+
142
+ return x
143
+
144
+
145
+ def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
146
+ """"
147
+ # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
148
+ # Kai Zhang
149
+ # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
150
+ # max_var = 2.5 * sf
151
+ """
152
+ # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
153
+ lambda_1 = min_var + np.random.rand() * (max_var - min_var)
154
+ lambda_2 = min_var + np.random.rand() * (max_var - min_var)
155
+ theta = np.random.rand() * np.pi # random theta
156
+ noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
157
+
158
+ # Set COV matrix using Lambdas and Theta
159
+ LAMBDA = np.diag([lambda_1, lambda_2])
160
+ Q = np.array([[np.cos(theta), -np.sin(theta)],
161
+ [np.sin(theta), np.cos(theta)]])
162
+ SIGMA = Q @ LAMBDA @ Q.T
163
+ INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
164
+
165
+ # Set expectation position (shifting kernel for aligned image)
166
+ MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
167
+ MU = MU[None, None, :, None]
168
+
169
+ # Create meshgrid for Gaussian
170
+ [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
171
+ Z = np.stack([X, Y], 2)[:, :, :, None]
172
+
173
+ # Calcualte Gaussian for every pixel of the kernel
174
+ ZZ = Z - MU
175
+ ZZ_t = ZZ.transpose(0, 1, 3, 2)
176
+ raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
177
+
178
+ # shift the kernel so it will be centered
179
+ # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
180
+
181
+ # Normalize the kernel and return
182
+ # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
183
+ kernel = raw_kernel / np.sum(raw_kernel)
184
+ return kernel
185
+
186
+
187
+ def fspecial_gaussian(hsize, sigma):
188
+ hsize = [hsize, hsize]
189
+ siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
190
+ std = sigma
191
+ [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
192
+ arg = -(x * x + y * y) / (2 * std * std)
193
+ h = np.exp(arg)
194
+ h[h < scipy.finfo(float).eps * h.max()] = 0
195
+ sumh = h.sum()
196
+ if sumh != 0:
197
+ h = h / sumh
198
+ return h
199
+
200
+
201
+ def fspecial_laplacian(alpha):
202
+ alpha = max([0, min([alpha, 1])])
203
+ h1 = alpha / (alpha + 1)
204
+ h2 = (1 - alpha) / (alpha + 1)
205
+ h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
206
+ h = np.array(h)
207
+ return h
208
+
209
+
210
+ def fspecial(filter_type, *args, **kwargs):
211
+ '''
212
+ python code from:
213
+ https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
214
+ '''
215
+ if filter_type == 'gaussian':
216
+ return fspecial_gaussian(*args, **kwargs)
217
+ if filter_type == 'laplacian':
218
+ return fspecial_laplacian(*args, **kwargs)
219
+
220
+
221
+ """
222
+ # --------------------------------------------
223
+ # degradation models
224
+ # --------------------------------------------
225
+ """
226
+
227
+
228
+ def bicubic_degradation(x, sf=3):
229
+ '''
230
+ Args:
231
+ x: HxWxC image, [0, 1]
232
+ sf: down-scale factor
233
+ Return:
234
+ bicubicly downsampled LR image
235
+ '''
236
+ x = util.imresize_np(x, scale=1 / sf)
237
+ return x
238
+
239
+
240
+ def srmd_degradation(x, k, sf=3):
241
+ ''' blur + bicubic downsampling
242
+ Args:
243
+ x: HxWxC image, [0, 1]
244
+ k: hxw, double
245
+ sf: down-scale factor
246
+ Return:
247
+ downsampled LR image
248
+ Reference:
249
+ @inproceedings{zhang2018learning,
250
+ title={Learning a single convolutional super-resolution network for multiple degradations},
251
+ author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
252
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
253
+ pages={3262--3271},
254
+ year={2018}
255
+ }
256
+ '''
257
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
258
+ x = bicubic_degradation(x, sf=sf)
259
+ return x
260
+
261
+
262
+ def dpsr_degradation(x, k, sf=3):
263
+ ''' bicubic downsampling + blur
264
+ Args:
265
+ x: HxWxC image, [0, 1]
266
+ k: hxw, double
267
+ sf: down-scale factor
268
+ Return:
269
+ downsampled LR image
270
+ Reference:
271
+ @inproceedings{zhang2019deep,
272
+ title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
273
+ author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
274
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
275
+ pages={1671--1681},
276
+ year={2019}
277
+ }
278
+ '''
279
+ x = bicubic_degradation(x, sf=sf)
280
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
281
+ return x
282
+
283
+
284
+ def classical_degradation(x, k, sf=3):
285
+ ''' blur + downsampling
286
+ Args:
287
+ x: HxWxC image, [0, 1]/[0, 255]
288
+ k: hxw, double
289
+ sf: down-scale factor
290
+ Return:
291
+ downsampled LR image
292
+ '''
293
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
294
+ # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
295
+ st = 0
296
+ return x[st::sf, st::sf, ...]
297
+
298
+
299
+ def add_sharpening(img, weight=0.5, radius=50, threshold=10):
300
+ """USM sharpening. borrowed from real-ESRGAN
301
+ Input image: I; Blurry image: B.
302
+ 1. K = I + weight * (I - B)
303
+ 2. Mask = 1 if abs(I - B) > threshold, else: 0
304
+ 3. Blur mask:
305
+ 4. Out = Mask * K + (1 - Mask) * I
306
+ Args:
307
+ img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
308
+ weight (float): Sharp weight. Default: 1.
309
+ radius (float): Kernel size of Gaussian blur. Default: 50.
310
+ threshold (int):
311
+ """
312
+ if radius % 2 == 0:
313
+ radius += 1
314
+ blur = cv2.GaussianBlur(img, (radius, radius), 0)
315
+ residual = img - blur
316
+ mask = np.abs(residual) * 255 > threshold
317
+ mask = mask.astype('float32')
318
+ soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
319
+
320
+ K = img + weight * residual
321
+ K = np.clip(K, 0, 1)
322
+ return soft_mask * K + (1 - soft_mask) * img
323
+
324
+
325
+ def add_blur(img, sf=4):
326
+ wd2 = 4.0 + sf
327
+ wd = 2.0 + 0.2 * sf
328
+ if random.random() < 0.5:
329
+ l1 = wd2 * random.random()
330
+ l2 = wd2 * random.random()
331
+ k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
332
+ else:
333
+ k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random())
334
+ img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
335
+
336
+ return img
337
+
338
+
339
+ def add_resize(img, sf=4):
340
+ rnum = np.random.rand()
341
+ if rnum > 0.8: # up
342
+ sf1 = random.uniform(1, 2)
343
+ elif rnum < 0.7: # down
344
+ sf1 = random.uniform(0.5 / sf, 1)
345
+ else:
346
+ sf1 = 1.0
347
+ img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
348
+ img = np.clip(img, 0.0, 1.0)
349
+
350
+ return img
351
+
352
+
353
+ # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
354
+ # noise_level = random.randint(noise_level1, noise_level2)
355
+ # rnum = np.random.rand()
356
+ # if rnum > 0.6: # add color Gaussian noise
357
+ # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
358
+ # elif rnum < 0.4: # add grayscale Gaussian noise
359
+ # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
360
+ # else: # add noise
361
+ # L = noise_level2 / 255.
362
+ # D = np.diag(np.random.rand(3))
363
+ # U = orth(np.random.rand(3, 3))
364
+ # conv = np.dot(np.dot(np.transpose(U), D), U)
365
+ # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
366
+ # img = np.clip(img, 0.0, 1.0)
367
+ # return img
368
+
369
+ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
370
+ noise_level = random.randint(noise_level1, noise_level2)
371
+ rnum = np.random.rand()
372
+ if rnum > 0.6: # add color Gaussian noise
373
+ img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
374
+ elif rnum < 0.4: # add grayscale Gaussian noise
375
+ img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
376
+ else: # add noise
377
+ L = noise_level2 / 255.
378
+ D = np.diag(np.random.rand(3))
379
+ U = orth(np.random.rand(3, 3))
380
+ conv = np.dot(np.dot(np.transpose(U), D), U)
381
+ img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
382
+ img = np.clip(img, 0.0, 1.0)
383
+ return img
384
+
385
+
386
+ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
387
+ noise_level = random.randint(noise_level1, noise_level2)
388
+ img = np.clip(img, 0.0, 1.0)
389
+ rnum = random.random()
390
+ if rnum > 0.6:
391
+ img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
392
+ elif rnum < 0.4:
393
+ img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
394
+ else:
395
+ L = noise_level2 / 255.
396
+ D = np.diag(np.random.rand(3))
397
+ U = orth(np.random.rand(3, 3))
398
+ conv = np.dot(np.dot(np.transpose(U), D), U)
399
+ img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
400
+ img = np.clip(img, 0.0, 1.0)
401
+ return img
402
+
403
+
404
+ def add_Poisson_noise(img):
405
+ img = np.clip((img * 255.0).round(), 0, 255) / 255.
406
+ vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
407
+ if random.random() < 0.5:
408
+ img = np.random.poisson(img * vals).astype(np.float32) / vals
409
+ else:
410
+ img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
411
+ img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
412
+ noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
413
+ img += noise_gray[:, :, np.newaxis]
414
+ img = np.clip(img, 0.0, 1.0)
415
+ return img
416
+
417
+
418
+ def add_JPEG_noise(img):
419
+ quality_factor = random.randint(30, 95)
420
+ img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
421
+ result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
422
+ img = cv2.imdecode(encimg, 1)
423
+ img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
424
+ return img
425
+
426
+
427
+ def random_crop(lq, hq, sf=4, lq_patchsize=64):
428
+ h, w = lq.shape[:2]
429
+ rnd_h = random.randint(0, h - lq_patchsize)
430
+ rnd_w = random.randint(0, w - lq_patchsize)
431
+ lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
432
+
433
+ rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
434
+ hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
435
+ return lq, hq
436
+
437
+
438
+ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
439
+ """
440
+ This is the degradation model of BSRGAN from the paper
441
+ "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
442
+ ----------
443
+ img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
444
+ sf: scale factor
445
+ isp_model: camera ISP model
446
+ Returns
447
+ -------
448
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
449
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
450
+ """
451
+ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
452
+ sf_ori = sf
453
+
454
+ h1, w1 = img.shape[:2]
455
+ img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
456
+ h, w = img.shape[:2]
457
+
458
+ if h < lq_patchsize * sf or w < lq_patchsize * sf:
459
+ raise ValueError(f'img size ({h1}X{w1}) is too small!')
460
+
461
+ hq = img.copy()
462
+
463
+ if sf == 4 and random.random() < scale2_prob: # downsample1
464
+ if np.random.rand() < 0.5:
465
+ img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
466
+ interpolation=random.choice([1, 2, 3]))
467
+ else:
468
+ img = util.imresize_np(img, 1 / 2, True)
469
+ img = np.clip(img, 0.0, 1.0)
470
+ sf = 2
471
+
472
+ shuffle_order = random.sample(range(7), 7)
473
+ idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
474
+ if idx1 > idx2: # keep downsample3 last
475
+ shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
476
+
477
+ for i in shuffle_order:
478
+
479
+ if i == 0:
480
+ img = add_blur(img, sf=sf)
481
+
482
+ elif i == 1:
483
+ img = add_blur(img, sf=sf)
484
+
485
+ elif i == 2:
486
+ a, b = img.shape[1], img.shape[0]
487
+ # downsample2
488
+ if random.random() < 0.75:
489
+ sf1 = random.uniform(1, 2 * sf)
490
+ img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
491
+ interpolation=random.choice([1, 2, 3]))
492
+ else:
493
+ k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
494
+ k_shifted = shift_pixel(k, sf)
495
+ k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
496
+ img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
497
+ img = img[0::sf, 0::sf, ...] # nearest downsampling
498
+ img = np.clip(img, 0.0, 1.0)
499
+
500
+ elif i == 3:
501
+ # downsample3
502
+ img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
503
+ img = np.clip(img, 0.0, 1.0)
504
+
505
+ elif i == 4:
506
+ # add Gaussian noise
507
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
508
+
509
+ elif i == 5:
510
+ # add JPEG noise
511
+ if random.random() < jpeg_prob:
512
+ img = add_JPEG_noise(img)
513
+
514
+ elif i == 6:
515
+ # add processed camera sensor noise
516
+ if random.random() < isp_prob and isp_model is not None:
517
+ with torch.no_grad():
518
+ img, hq = isp_model.forward(img.copy(), hq)
519
+
520
+ # add final JPEG compression noise
521
+ img = add_JPEG_noise(img)
522
+
523
+ # random crop
524
+ img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
525
+
526
+ return img, hq
527
+
528
+
529
+ # todo no isp_model?
530
+ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
531
+ """
532
+ This is the degradation model of BSRGAN from the paper
533
+ "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
534
+ ----------
535
+ sf: scale factor
536
+ isp_model: camera ISP model
537
+ Returns
538
+ -------
539
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
540
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
541
+ """
542
+ image = util.uint2single(image)
543
+ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
544
+ sf_ori = sf
545
+
546
+ h1, w1 = image.shape[:2]
547
+ image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
548
+ h, w = image.shape[:2]
549
+
550
+ hq = image.copy()
551
+
552
+ if sf == 4 and random.random() < scale2_prob: # downsample1
553
+ if np.random.rand() < 0.5:
554
+ image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
555
+ interpolation=random.choice([1, 2, 3]))
556
+ else:
557
+ image = util.imresize_np(image, 1 / 2, True)
558
+ image = np.clip(image, 0.0, 1.0)
559
+ sf = 2
560
+
561
+ shuffle_order = random.sample(range(7), 7)
562
+ idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
563
+ if idx1 > idx2: # keep downsample3 last
564
+ shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
565
+
566
+ for i in shuffle_order:
567
+
568
+ if i == 0:
569
+ image = add_blur(image, sf=sf)
570
+
571
+ elif i == 1:
572
+ image = add_blur(image, sf=sf)
573
+
574
+ elif i == 2:
575
+ a, b = image.shape[1], image.shape[0]
576
+ # downsample2
577
+ if random.random() < 0.75:
578
+ sf1 = random.uniform(1, 2 * sf)
579
+ image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
580
+ interpolation=random.choice([1, 2, 3]))
581
+ else:
582
+ k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
583
+ k_shifted = shift_pixel(k, sf)
584
+ k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
585
+ image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
586
+ image = image[0::sf, 0::sf, ...] # nearest downsampling
587
+ image = np.clip(image, 0.0, 1.0)
588
+
589
+ elif i == 3:
590
+ # downsample3
591
+ image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
592
+ image = np.clip(image, 0.0, 1.0)
593
+
594
+ elif i == 4:
595
+ # add Gaussian noise
596
+ image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25)
597
+
598
+ elif i == 5:
599
+ # add JPEG noise
600
+ if random.random() < jpeg_prob:
601
+ image = add_JPEG_noise(image)
602
+
603
+ # elif i == 6:
604
+ # # add processed camera sensor noise
605
+ # if random.random() < isp_prob and isp_model is not None:
606
+ # with torch.no_grad():
607
+ # img, hq = isp_model.forward(img.copy(), hq)
608
+
609
+ # add final JPEG compression noise
610
+ image = add_JPEG_noise(image)
611
+ image = util.single2uint(image)
612
+ example = {"image":image}
613
+ return example
614
+
615
+
616
+ # TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc...
617
+ def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None):
618
+ """
619
+ This is an extended degradation model by combining
620
+ the degradation models of BSRGAN and Real-ESRGAN
621
+ ----------
622
+ img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
623
+ sf: scale factor
624
+ use_shuffle: the degradation shuffle
625
+ use_sharp: sharpening the img
626
+ Returns
627
+ -------
628
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
629
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
630
+ """
631
+
632
+ h1, w1 = img.shape[:2]
633
+ img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
634
+ h, w = img.shape[:2]
635
+
636
+ if h < lq_patchsize * sf or w < lq_patchsize * sf:
637
+ raise ValueError(f'img size ({h1}X{w1}) is too small!')
638
+
639
+ if use_sharp:
640
+ img = add_sharpening(img)
641
+ hq = img.copy()
642
+
643
+ if random.random() < shuffle_prob:
644
+ shuffle_order = random.sample(range(13), 13)
645
+ else:
646
+ shuffle_order = list(range(13))
647
+ # local shuffle for noise, JPEG is always the last one
648
+ shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6)))
649
+ shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13)))
650
+
651
+ poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1
652
+
653
+ for i in shuffle_order:
654
+ if i == 0:
655
+ img = add_blur(img, sf=sf)
656
+ elif i == 1:
657
+ img = add_resize(img, sf=sf)
658
+ elif i == 2:
659
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
660
+ elif i == 3:
661
+ if random.random() < poisson_prob:
662
+ img = add_Poisson_noise(img)
663
+ elif i == 4:
664
+ if random.random() < speckle_prob:
665
+ img = add_speckle_noise(img)
666
+ elif i == 5:
667
+ if random.random() < isp_prob and isp_model is not None:
668
+ with torch.no_grad():
669
+ img, hq = isp_model.forward(img.copy(), hq)
670
+ elif i == 6:
671
+ img = add_JPEG_noise(img)
672
+ elif i == 7:
673
+ img = add_blur(img, sf=sf)
674
+ elif i == 8:
675
+ img = add_resize(img, sf=sf)
676
+ elif i == 9:
677
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
678
+ elif i == 10:
679
+ if random.random() < poisson_prob:
680
+ img = add_Poisson_noise(img)
681
+ elif i == 11:
682
+ if random.random() < speckle_prob:
683
+ img = add_speckle_noise(img)
684
+ elif i == 12:
685
+ if random.random() < isp_prob and isp_model is not None:
686
+ with torch.no_grad():
687
+ img, hq = isp_model.forward(img.copy(), hq)
688
+ else:
689
+ print('check the shuffle!')
690
+
691
+ # resize to desired size
692
+ img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])),
693
+ interpolation=random.choice([1, 2, 3]))
694
+
695
+ # add final JPEG compression noise
696
+ img = add_JPEG_noise(img)
697
+
698
+ # random crop
699
+ img, hq = random_crop(img, hq, sf, lq_patchsize)
700
+
701
+ return img, hq
702
+
703
+
704
+ if __name__ == '__main__':
705
+ print("hey")
706
+ img = util.imread_uint('utils/test.png', 3)
707
+ print(img)
708
+ img = util.uint2single(img)
709
+ print(img)
710
+ img = img[:448, :448]
711
+ h = img.shape[0] // 4
712
+ print("resizing to", h)
713
+ sf = 4
714
+ deg_fn = partial(degradation_bsrgan_variant, sf=sf)
715
+ for i in range(20):
716
+ print(i)
717
+ img_lq = deg_fn(img)
718
+ print(img_lq)
719
+ img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"]
720
+ print(img_lq.shape)
721
+ print("bicubic", img_lq_bicubic.shape)
722
+ print(img_hq.shape)
723
+ lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
724
+ interpolation=0)
725
+ lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
726
+ interpolation=0)
727
+ img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
728
+ util.imsave(img_concat, str(i) + '.png')
729
+
730
+
3DTopia/ldm/modules/image_degradation/bsrgan_light.py ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import numpy as np
3
+ import cv2
4
+ import torch
5
+
6
+ from functools import partial
7
+ import random
8
+ from scipy import ndimage
9
+ import scipy
10
+ import scipy.stats as ss
11
+ from scipy.interpolate import interp2d
12
+ from scipy.linalg import orth
13
+ import albumentations
14
+
15
+ import ldm.modules.image_degradation.utils_image as util
16
+
17
+ """
18
+ # --------------------------------------------
19
+ # Super-Resolution
20
+ # --------------------------------------------
21
+ #
22
+ # Kai Zhang (cskaizhang@gmail.com)
23
+ # https://github.com/cszn
24
+ # From 2019/03--2021/08
25
+ # --------------------------------------------
26
+ """
27
+
28
+
29
+ def modcrop_np(img, sf):
30
+ '''
31
+ Args:
32
+ img: numpy image, WxH or WxHxC
33
+ sf: scale factor
34
+ Return:
35
+ cropped image
36
+ '''
37
+ w, h = img.shape[:2]
38
+ im = np.copy(img)
39
+ return im[:w - w % sf, :h - h % sf, ...]
40
+
41
+
42
+ """
43
+ # --------------------------------------------
44
+ # anisotropic Gaussian kernels
45
+ # --------------------------------------------
46
+ """
47
+
48
+
49
+ def analytic_kernel(k):
50
+ """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
51
+ k_size = k.shape[0]
52
+ # Calculate the big kernels size
53
+ big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
54
+ # Loop over the small kernel to fill the big one
55
+ for r in range(k_size):
56
+ for c in range(k_size):
57
+ big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
58
+ # Crop the edges of the big kernel to ignore very small values and increase run time of SR
59
+ crop = k_size // 2
60
+ cropped_big_k = big_k[crop:-crop, crop:-crop]
61
+ # Normalize to 1
62
+ return cropped_big_k / cropped_big_k.sum()
63
+
64
+
65
+ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
66
+ """ generate an anisotropic Gaussian kernel
67
+ Args:
68
+ ksize : e.g., 15, kernel size
69
+ theta : [0, pi], rotation angle range
70
+ l1 : [0.1,50], scaling of eigenvalues
71
+ l2 : [0.1,l1], scaling of eigenvalues
72
+ If l1 = l2, will get an isotropic Gaussian kernel.
73
+ Returns:
74
+ k : kernel
75
+ """
76
+
77
+ v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
78
+ V = np.array([[v[0], v[1]], [v[1], -v[0]]])
79
+ D = np.array([[l1, 0], [0, l2]])
80
+ Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
81
+ k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
82
+
83
+ return k
84
+
85
+
86
+ def gm_blur_kernel(mean, cov, size=15):
87
+ center = size / 2.0 + 0.5
88
+ k = np.zeros([size, size])
89
+ for y in range(size):
90
+ for x in range(size):
91
+ cy = y - center + 1
92
+ cx = x - center + 1
93
+ k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
94
+
95
+ k = k / np.sum(k)
96
+ return k
97
+
98
+
99
+ def shift_pixel(x, sf, upper_left=True):
100
+ """shift pixel for super-resolution with different scale factors
101
+ Args:
102
+ x: WxHxC or WxH
103
+ sf: scale factor
104
+ upper_left: shift direction
105
+ """
106
+ h, w = x.shape[:2]
107
+ shift = (sf - 1) * 0.5
108
+ xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
109
+ if upper_left:
110
+ x1 = xv + shift
111
+ y1 = yv + shift
112
+ else:
113
+ x1 = xv - shift
114
+ y1 = yv - shift
115
+
116
+ x1 = np.clip(x1, 0, w - 1)
117
+ y1 = np.clip(y1, 0, h - 1)
118
+
119
+ if x.ndim == 2:
120
+ x = interp2d(xv, yv, x)(x1, y1)
121
+ if x.ndim == 3:
122
+ for i in range(x.shape[-1]):
123
+ x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
124
+
125
+ return x
126
+
127
+
128
+ def blur(x, k):
129
+ '''
130
+ x: image, NxcxHxW
131
+ k: kernel, Nx1xhxw
132
+ '''
133
+ n, c = x.shape[:2]
134
+ p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
135
+ x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
136
+ k = k.repeat(1, c, 1, 1)
137
+ k = k.view(-1, 1, k.shape[2], k.shape[3])
138
+ x = x.view(1, -1, x.shape[2], x.shape[3])
139
+ x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
140
+ x = x.view(n, c, x.shape[2], x.shape[3])
141
+
142
+ return x
143
+
144
+
145
+ def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
146
+ """"
147
+ # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
148
+ # Kai Zhang
149
+ # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
150
+ # max_var = 2.5 * sf
151
+ """
152
+ # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
153
+ lambda_1 = min_var + np.random.rand() * (max_var - min_var)
154
+ lambda_2 = min_var + np.random.rand() * (max_var - min_var)
155
+ theta = np.random.rand() * np.pi # random theta
156
+ noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
157
+
158
+ # Set COV matrix using Lambdas and Theta
159
+ LAMBDA = np.diag([lambda_1, lambda_2])
160
+ Q = np.array([[np.cos(theta), -np.sin(theta)],
161
+ [np.sin(theta), np.cos(theta)]])
162
+ SIGMA = Q @ LAMBDA @ Q.T
163
+ INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
164
+
165
+ # Set expectation position (shifting kernel for aligned image)
166
+ MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
167
+ MU = MU[None, None, :, None]
168
+
169
+ # Create meshgrid for Gaussian
170
+ [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
171
+ Z = np.stack([X, Y], 2)[:, :, :, None]
172
+
173
+ # Calcualte Gaussian for every pixel of the kernel
174
+ ZZ = Z - MU
175
+ ZZ_t = ZZ.transpose(0, 1, 3, 2)
176
+ raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
177
+
178
+ # shift the kernel so it will be centered
179
+ # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
180
+
181
+ # Normalize the kernel and return
182
+ # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
183
+ kernel = raw_kernel / np.sum(raw_kernel)
184
+ return kernel
185
+
186
+
187
+ def fspecial_gaussian(hsize, sigma):
188
+ hsize = [hsize, hsize]
189
+ siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
190
+ std = sigma
191
+ [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
192
+ arg = -(x * x + y * y) / (2 * std * std)
193
+ h = np.exp(arg)
194
+ h[h < scipy.finfo(float).eps * h.max()] = 0
195
+ sumh = h.sum()
196
+ if sumh != 0:
197
+ h = h / sumh
198
+ return h
199
+
200
+
201
+ def fspecial_laplacian(alpha):
202
+ alpha = max([0, min([alpha, 1])])
203
+ h1 = alpha / (alpha + 1)
204
+ h2 = (1 - alpha) / (alpha + 1)
205
+ h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
206
+ h = np.array(h)
207
+ return h
208
+
209
+
210
+ def fspecial(filter_type, *args, **kwargs):
211
+ '''
212
+ python code from:
213
+ https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
214
+ '''
215
+ if filter_type == 'gaussian':
216
+ return fspecial_gaussian(*args, **kwargs)
217
+ if filter_type == 'laplacian':
218
+ return fspecial_laplacian(*args, **kwargs)
219
+
220
+
221
+ """
222
+ # --------------------------------------------
223
+ # degradation models
224
+ # --------------------------------------------
225
+ """
226
+
227
+
228
+ def bicubic_degradation(x, sf=3):
229
+ '''
230
+ Args:
231
+ x: HxWxC image, [0, 1]
232
+ sf: down-scale factor
233
+ Return:
234
+ bicubicly downsampled LR image
235
+ '''
236
+ x = util.imresize_np(x, scale=1 / sf)
237
+ return x
238
+
239
+
240
+ def srmd_degradation(x, k, sf=3):
241
+ ''' blur + bicubic downsampling
242
+ Args:
243
+ x: HxWxC image, [0, 1]
244
+ k: hxw, double
245
+ sf: down-scale factor
246
+ Return:
247
+ downsampled LR image
248
+ Reference:
249
+ @inproceedings{zhang2018learning,
250
+ title={Learning a single convolutional super-resolution network for multiple degradations},
251
+ author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
252
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
253
+ pages={3262--3271},
254
+ year={2018}
255
+ }
256
+ '''
257
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
258
+ x = bicubic_degradation(x, sf=sf)
259
+ return x
260
+
261
+
262
+ def dpsr_degradation(x, k, sf=3):
263
+ ''' bicubic downsampling + blur
264
+ Args:
265
+ x: HxWxC image, [0, 1]
266
+ k: hxw, double
267
+ sf: down-scale factor
268
+ Return:
269
+ downsampled LR image
270
+ Reference:
271
+ @inproceedings{zhang2019deep,
272
+ title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
273
+ author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
274
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
275
+ pages={1671--1681},
276
+ year={2019}
277
+ }
278
+ '''
279
+ x = bicubic_degradation(x, sf=sf)
280
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
281
+ return x
282
+
283
+
284
+ def classical_degradation(x, k, sf=3):
285
+ ''' blur + downsampling
286
+ Args:
287
+ x: HxWxC image, [0, 1]/[0, 255]
288
+ k: hxw, double
289
+ sf: down-scale factor
290
+ Return:
291
+ downsampled LR image
292
+ '''
293
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
294
+ # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
295
+ st = 0
296
+ return x[st::sf, st::sf, ...]
297
+
298
+
299
+ def add_sharpening(img, weight=0.5, radius=50, threshold=10):
300
+ """USM sharpening. borrowed from real-ESRGAN
301
+ Input image: I; Blurry image: B.
302
+ 1. K = I + weight * (I - B)
303
+ 2. Mask = 1 if abs(I - B) > threshold, else: 0
304
+ 3. Blur mask:
305
+ 4. Out = Mask * K + (1 - Mask) * I
306
+ Args:
307
+ img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
308
+ weight (float): Sharp weight. Default: 1.
309
+ radius (float): Kernel size of Gaussian blur. Default: 50.
310
+ threshold (int):
311
+ """
312
+ if radius % 2 == 0:
313
+ radius += 1
314
+ blur = cv2.GaussianBlur(img, (radius, radius), 0)
315
+ residual = img - blur
316
+ mask = np.abs(residual) * 255 > threshold
317
+ mask = mask.astype('float32')
318
+ soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
319
+
320
+ K = img + weight * residual
321
+ K = np.clip(K, 0, 1)
322
+ return soft_mask * K + (1 - soft_mask) * img
323
+
324
+
325
+ def add_blur(img, sf=4):
326
+ wd2 = 4.0 + sf
327
+ wd = 2.0 + 0.2 * sf
328
+
329
+ wd2 = wd2/4
330
+ wd = wd/4
331
+
332
+ if random.random() < 0.5:
333
+ l1 = wd2 * random.random()
334
+ l2 = wd2 * random.random()
335
+ k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
336
+ else:
337
+ k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
338
+ img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
339
+
340
+ return img
341
+
342
+
343
+ def add_resize(img, sf=4):
344
+ rnum = np.random.rand()
345
+ if rnum > 0.8: # up
346
+ sf1 = random.uniform(1, 2)
347
+ elif rnum < 0.7: # down
348
+ sf1 = random.uniform(0.5 / sf, 1)
349
+ else:
350
+ sf1 = 1.0
351
+ img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
352
+ img = np.clip(img, 0.0, 1.0)
353
+
354
+ return img
355
+
356
+
357
+ # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
358
+ # noise_level = random.randint(noise_level1, noise_level2)
359
+ # rnum = np.random.rand()
360
+ # if rnum > 0.6: # add color Gaussian noise
361
+ # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
362
+ # elif rnum < 0.4: # add grayscale Gaussian noise
363
+ # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
364
+ # else: # add noise
365
+ # L = noise_level2 / 255.
366
+ # D = np.diag(np.random.rand(3))
367
+ # U = orth(np.random.rand(3, 3))
368
+ # conv = np.dot(np.dot(np.transpose(U), D), U)
369
+ # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
370
+ # img = np.clip(img, 0.0, 1.0)
371
+ # return img
372
+
373
+ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
374
+ noise_level = random.randint(noise_level1, noise_level2)
375
+ rnum = np.random.rand()
376
+ if rnum > 0.6: # add color Gaussian noise
377
+ img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
378
+ elif rnum < 0.4: # add grayscale Gaussian noise
379
+ img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
380
+ else: # add noise
381
+ L = noise_level2 / 255.
382
+ D = np.diag(np.random.rand(3))
383
+ U = orth(np.random.rand(3, 3))
384
+ conv = np.dot(np.dot(np.transpose(U), D), U)
385
+ img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
386
+ img = np.clip(img, 0.0, 1.0)
387
+ return img
388
+
389
+
390
+ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
391
+ noise_level = random.randint(noise_level1, noise_level2)
392
+ img = np.clip(img, 0.0, 1.0)
393
+ rnum = random.random()
394
+ if rnum > 0.6:
395
+ img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
396
+ elif rnum < 0.4:
397
+ img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
398
+ else:
399
+ L = noise_level2 / 255.
400
+ D = np.diag(np.random.rand(3))
401
+ U = orth(np.random.rand(3, 3))
402
+ conv = np.dot(np.dot(np.transpose(U), D), U)
403
+ img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
404
+ img = np.clip(img, 0.0, 1.0)
405
+ return img
406
+
407
+
408
+ def add_Poisson_noise(img):
409
+ img = np.clip((img * 255.0).round(), 0, 255) / 255.
410
+ vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
411
+ if random.random() < 0.5:
412
+ img = np.random.poisson(img * vals).astype(np.float32) / vals
413
+ else:
414
+ img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
415
+ img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
416
+ noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
417
+ img += noise_gray[:, :, np.newaxis]
418
+ img = np.clip(img, 0.0, 1.0)
419
+ return img
420
+
421
+
422
+ def add_JPEG_noise(img):
423
+ quality_factor = random.randint(80, 95)
424
+ img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
425
+ result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
426
+ img = cv2.imdecode(encimg, 1)
427
+ img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
428
+ return img
429
+
430
+
431
+ def random_crop(lq, hq, sf=4, lq_patchsize=64):
432
+ h, w = lq.shape[:2]
433
+ rnd_h = random.randint(0, h - lq_patchsize)
434
+ rnd_w = random.randint(0, w - lq_patchsize)
435
+ lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
436
+
437
+ rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
438
+ hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
439
+ return lq, hq
440
+
441
+
442
+ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
443
+ """
444
+ This is the degradation model of BSRGAN from the paper
445
+ "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
446
+ ----------
447
+ img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
448
+ sf: scale factor
449
+ isp_model: camera ISP model
450
+ Returns
451
+ -------
452
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
453
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
454
+ """
455
+ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
456
+ sf_ori = sf
457
+
458
+ h1, w1 = img.shape[:2]
459
+ img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
460
+ h, w = img.shape[:2]
461
+
462
+ if h < lq_patchsize * sf or w < lq_patchsize * sf:
463
+ raise ValueError(f'img size ({h1}X{w1}) is too small!')
464
+
465
+ hq = img.copy()
466
+
467
+ if sf == 4 and random.random() < scale2_prob: # downsample1
468
+ if np.random.rand() < 0.5:
469
+ img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
470
+ interpolation=random.choice([1, 2, 3]))
471
+ else:
472
+ img = util.imresize_np(img, 1 / 2, True)
473
+ img = np.clip(img, 0.0, 1.0)
474
+ sf = 2
475
+
476
+ shuffle_order = random.sample(range(7), 7)
477
+ idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
478
+ if idx1 > idx2: # keep downsample3 last
479
+ shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
480
+
481
+ for i in shuffle_order:
482
+
483
+ if i == 0:
484
+ img = add_blur(img, sf=sf)
485
+
486
+ elif i == 1:
487
+ img = add_blur(img, sf=sf)
488
+
489
+ elif i == 2:
490
+ a, b = img.shape[1], img.shape[0]
491
+ # downsample2
492
+ if random.random() < 0.75:
493
+ sf1 = random.uniform(1, 2 * sf)
494
+ img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
495
+ interpolation=random.choice([1, 2, 3]))
496
+ else:
497
+ k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
498
+ k_shifted = shift_pixel(k, sf)
499
+ k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
500
+ img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
501
+ img = img[0::sf, 0::sf, ...] # nearest downsampling
502
+ img = np.clip(img, 0.0, 1.0)
503
+
504
+ elif i == 3:
505
+ # downsample3
506
+ img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
507
+ img = np.clip(img, 0.0, 1.0)
508
+
509
+ elif i == 4:
510
+ # add Gaussian noise
511
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
512
+
513
+ elif i == 5:
514
+ # add JPEG noise
515
+ if random.random() < jpeg_prob:
516
+ img = add_JPEG_noise(img)
517
+
518
+ elif i == 6:
519
+ # add processed camera sensor noise
520
+ if random.random() < isp_prob and isp_model is not None:
521
+ with torch.no_grad():
522
+ img, hq = isp_model.forward(img.copy(), hq)
523
+
524
+ # add final JPEG compression noise
525
+ img = add_JPEG_noise(img)
526
+
527
+ # random crop
528
+ img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
529
+
530
+ return img, hq
531
+
532
+
533
+ # todo no isp_model?
534
+ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
535
+ """
536
+ This is the degradation model of BSRGAN from the paper
537
+ "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
538
+ ----------
539
+ sf: scale factor
540
+ isp_model: camera ISP model
541
+ Returns
542
+ -------
543
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
544
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
545
+ """
546
+ image = util.uint2single(image)
547
+ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
548
+ sf_ori = sf
549
+
550
+ h1, w1 = image.shape[:2]
551
+ image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
552
+ h, w = image.shape[:2]
553
+
554
+ hq = image.copy()
555
+
556
+ if sf == 4 and random.random() < scale2_prob: # downsample1
557
+ if np.random.rand() < 0.5:
558
+ image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
559
+ interpolation=random.choice([1, 2, 3]))
560
+ else:
561
+ image = util.imresize_np(image, 1 / 2, True)
562
+ image = np.clip(image, 0.0, 1.0)
563
+ sf = 2
564
+
565
+ shuffle_order = random.sample(range(7), 7)
566
+ idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
567
+ if idx1 > idx2: # keep downsample3 last
568
+ shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
569
+
570
+ for i in shuffle_order:
571
+
572
+ if i == 0:
573
+ image = add_blur(image, sf=sf)
574
+
575
+ # elif i == 1:
576
+ # image = add_blur(image, sf=sf)
577
+
578
+ if i == 0:
579
+ pass
580
+
581
+ elif i == 2:
582
+ a, b = image.shape[1], image.shape[0]
583
+ # downsample2
584
+ if random.random() < 0.8:
585
+ sf1 = random.uniform(1, 2 * sf)
586
+ image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
587
+ interpolation=random.choice([1, 2, 3]))
588
+ else:
589
+ k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
590
+ k_shifted = shift_pixel(k, sf)
591
+ k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
592
+ image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
593
+ image = image[0::sf, 0::sf, ...] # nearest downsampling
594
+
595
+ image = np.clip(image, 0.0, 1.0)
596
+
597
+ elif i == 3:
598
+ # downsample3
599
+ image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
600
+ image = np.clip(image, 0.0, 1.0)
601
+
602
+ elif i == 4:
603
+ # add Gaussian noise
604
+ image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
605
+
606
+ elif i == 5:
607
+ # add JPEG noise
608
+ if random.random() < jpeg_prob:
609
+ image = add_JPEG_noise(image)
610
+ #
611
+ # elif i == 6:
612
+ # # add processed camera sensor noise
613
+ # if random.random() < isp_prob and isp_model is not None:
614
+ # with torch.no_grad():
615
+ # img, hq = isp_model.forward(img.copy(), hq)
616
+
617
+ # add final JPEG compression noise
618
+ image = add_JPEG_noise(image)
619
+ image = util.single2uint(image)
620
+ example = {"image": image}
621
+ return example
622
+
623
+
624
+
625
+
626
+ if __name__ == '__main__':
627
+ print("hey")
628
+ img = util.imread_uint('utils/test.png', 3)
629
+ img = img[:448, :448]
630
+ h = img.shape[0] // 4
631
+ print("resizing to", h)
632
+ sf = 4
633
+ deg_fn = partial(degradation_bsrgan_variant, sf=sf)
634
+ for i in range(20):
635
+ print(i)
636
+ img_hq = img
637
+ img_lq = deg_fn(img)["image"]
638
+ img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
639
+ print(img_lq)
640
+ img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
641
+ print(img_lq.shape)
642
+ print("bicubic", img_lq_bicubic.shape)
643
+ print(img_hq.shape)
644
+ lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
645
+ interpolation=0)
646
+ lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
647
+ (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
648
+ interpolation=0)
649
+ img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
650
+ util.imsave(img_concat, str(i) + '.png')
3DTopia/ldm/modules/image_degradation/utils/test.png ADDED
3DTopia/ldm/modules/image_degradation/utils_image.py ADDED
@@ -0,0 +1,916 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import math
3
+ import random
4
+ import numpy as np
5
+ import torch
6
+ import cv2
7
+ from torchvision.utils import make_grid
8
+ from datetime import datetime
9
+ #import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
10
+
11
+
12
+ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
13
+
14
+
15
+ '''
16
+ # --------------------------------------------
17
+ # Kai Zhang (github: https://github.com/cszn)
18
+ # 03/Mar/2019
19
+ # --------------------------------------------
20
+ # https://github.com/twhui/SRGAN-pyTorch
21
+ # https://github.com/xinntao/BasicSR
22
+ # --------------------------------------------
23
+ '''
24
+
25
+
26
+ IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif']
27
+
28
+
29
+ def is_image_file(filename):
30
+ return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
31
+
32
+
33
+ def get_timestamp():
34
+ return datetime.now().strftime('%y%m%d-%H%M%S')
35
+
36
+
37
+ def imshow(x, title=None, cbar=False, figsize=None):
38
+ plt.figure(figsize=figsize)
39
+ plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')
40
+ if title:
41
+ plt.title(title)
42
+ if cbar:
43
+ plt.colorbar()
44
+ plt.show()
45
+
46
+
47
+ def surf(Z, cmap='rainbow', figsize=None):
48
+ plt.figure(figsize=figsize)
49
+ ax3 = plt.axes(projection='3d')
50
+
51
+ w, h = Z.shape[:2]
52
+ xx = np.arange(0,w,1)
53
+ yy = np.arange(0,h,1)
54
+ X, Y = np.meshgrid(xx, yy)
55
+ ax3.plot_surface(X,Y,Z,cmap=cmap)
56
+ #ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
57
+ plt.show()
58
+
59
+
60
+ '''
61
+ # --------------------------------------------
62
+ # get image pathes
63
+ # --------------------------------------------
64
+ '''
65
+
66
+
67
+ def get_image_paths(dataroot):
68
+ paths = None # return None if dataroot is None
69
+ if dataroot is not None:
70
+ paths = sorted(_get_paths_from_images(dataroot))
71
+ return paths
72
+
73
+
74
+ def _get_paths_from_images(path):
75
+ assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
76
+ images = []
77
+ for dirpath, _, fnames in sorted(os.walk(path)):
78
+ for fname in sorted(fnames):
79
+ if is_image_file(fname):
80
+ img_path = os.path.join(dirpath, fname)
81
+ images.append(img_path)
82
+ assert images, '{:s} has no valid image file'.format(path)
83
+ return images
84
+
85
+
86
+ '''
87
+ # --------------------------------------------
88
+ # split large images into small images
89
+ # --------------------------------------------
90
+ '''
91
+
92
+
93
+ def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
94
+ w, h = img.shape[:2]
95
+ patches = []
96
+ if w > p_max and h > p_max:
97
+ w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int))
98
+ h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int))
99
+ w1.append(w-p_size)
100
+ h1.append(h-p_size)
101
+ # print(w1)
102
+ # print(h1)
103
+ for i in w1:
104
+ for j in h1:
105
+ patches.append(img[i:i+p_size, j:j+p_size,:])
106
+ else:
107
+ patches.append(img)
108
+
109
+ return patches
110
+
111
+
112
+ def imssave(imgs, img_path):
113
+ """
114
+ imgs: list, N images of size WxHxC
115
+ """
116
+ img_name, ext = os.path.splitext(os.path.basename(img_path))
117
+
118
+ for i, img in enumerate(imgs):
119
+ if img.ndim == 3:
120
+ img = img[:, :, [2, 1, 0]]
121
+ new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png')
122
+ cv2.imwrite(new_path, img)
123
+
124
+
125
+ def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000):
126
+ """
127
+ split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
128
+ and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
129
+ will be splitted.
130
+ Args:
131
+ original_dataroot:
132
+ taget_dataroot:
133
+ p_size: size of small images
134
+ p_overlap: patch size in training is a good choice
135
+ p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
136
+ """
137
+ paths = get_image_paths(original_dataroot)
138
+ for img_path in paths:
139
+ # img_name, ext = os.path.splitext(os.path.basename(img_path))
140
+ img = imread_uint(img_path, n_channels=n_channels)
141
+ patches = patches_from_image(img, p_size, p_overlap, p_max)
142
+ imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path)))
143
+ #if original_dataroot == taget_dataroot:
144
+ #del img_path
145
+
146
+ '''
147
+ # --------------------------------------------
148
+ # makedir
149
+ # --------------------------------------------
150
+ '''
151
+
152
+
153
+ def mkdir(path):
154
+ if not os.path.exists(path):
155
+ os.makedirs(path)
156
+
157
+
158
+ def mkdirs(paths):
159
+ if isinstance(paths, str):
160
+ mkdir(paths)
161
+ else:
162
+ for path in paths:
163
+ mkdir(path)
164
+
165
+
166
+ def mkdir_and_rename(path):
167
+ if os.path.exists(path):
168
+ new_name = path + '_archived_' + get_timestamp()
169
+ print('Path already exists. Rename it to [{:s}]'.format(new_name))
170
+ os.rename(path, new_name)
171
+ os.makedirs(path)
172
+
173
+
174
+ '''
175
+ # --------------------------------------------
176
+ # read image from path
177
+ # opencv is fast, but read BGR numpy image
178
+ # --------------------------------------------
179
+ '''
180
+
181
+
182
+ # --------------------------------------------
183
+ # get uint8 image of size HxWxn_channles (RGB)
184
+ # --------------------------------------------
185
+ def imread_uint(path, n_channels=3):
186
+ # input: path
187
+ # output: HxWx3(RGB or GGG), or HxWx1 (G)
188
+ if n_channels == 1:
189
+ img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
190
+ img = np.expand_dims(img, axis=2) # HxWx1
191
+ elif n_channels == 3:
192
+ img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
193
+ if img.ndim == 2:
194
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
195
+ else:
196
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
197
+ return img
198
+
199
+
200
+ # --------------------------------------------
201
+ # matlab's imwrite
202
+ # --------------------------------------------
203
+ def imsave(img, img_path):
204
+ img = np.squeeze(img)
205
+ if img.ndim == 3:
206
+ img = img[:, :, [2, 1, 0]]
207
+ cv2.imwrite(img_path, img)
208
+
209
+ def imwrite(img, img_path):
210
+ img = np.squeeze(img)
211
+ if img.ndim == 3:
212
+ img = img[:, :, [2, 1, 0]]
213
+ cv2.imwrite(img_path, img)
214
+
215
+
216
+
217
+ # --------------------------------------------
218
+ # get single image of size HxWxn_channles (BGR)
219
+ # --------------------------------------------
220
+ def read_img(path):
221
+ # read image by cv2
222
+ # return: Numpy float32, HWC, BGR, [0,1]
223
+ img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE
224
+ img = img.astype(np.float32) / 255.
225
+ if img.ndim == 2:
226
+ img = np.expand_dims(img, axis=2)
227
+ # some images have 4 channels
228
+ if img.shape[2] > 3:
229
+ img = img[:, :, :3]
230
+ return img
231
+
232
+
233
+ '''
234
+ # --------------------------------------------
235
+ # image format conversion
236
+ # --------------------------------------------
237
+ # numpy(single) <---> numpy(unit)
238
+ # numpy(single) <---> tensor
239
+ # numpy(unit) <---> tensor
240
+ # --------------------------------------------
241
+ '''
242
+
243
+
244
+ # --------------------------------------------
245
+ # numpy(single) [0, 1] <---> numpy(unit)
246
+ # --------------------------------------------
247
+
248
+
249
+ def uint2single(img):
250
+
251
+ return np.float32(img/255.)
252
+
253
+
254
+ def single2uint(img):
255
+
256
+ return np.uint8((img.clip(0, 1)*255.).round())
257
+
258
+
259
+ def uint162single(img):
260
+
261
+ return np.float32(img/65535.)
262
+
263
+
264
+ def single2uint16(img):
265
+
266
+ return np.uint16((img.clip(0, 1)*65535.).round())
267
+
268
+
269
+ # --------------------------------------------
270
+ # numpy(unit) (HxWxC or HxW) <---> tensor
271
+ # --------------------------------------------
272
+
273
+
274
+ # convert uint to 4-dimensional torch tensor
275
+ def uint2tensor4(img):
276
+ if img.ndim == 2:
277
+ img = np.expand_dims(img, axis=2)
278
+ return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)
279
+
280
+
281
+ # convert uint to 3-dimensional torch tensor
282
+ def uint2tensor3(img):
283
+ if img.ndim == 2:
284
+ img = np.expand_dims(img, axis=2)
285
+ return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
286
+
287
+
288
+ # convert 2/3/4-dimensional torch tensor to uint
289
+ def tensor2uint(img):
290
+ img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
291
+ if img.ndim == 3:
292
+ img = np.transpose(img, (1, 2, 0))
293
+ return np.uint8((img*255.0).round())
294
+
295
+
296
+ # --------------------------------------------
297
+ # numpy(single) (HxWxC) <---> tensor
298
+ # --------------------------------------------
299
+
300
+
301
+ # convert single (HxWxC) to 3-dimensional torch tensor
302
+ def single2tensor3(img):
303
+ return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
304
+
305
+
306
+ # convert single (HxWxC) to 4-dimensional torch tensor
307
+ def single2tensor4(img):
308
+ return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
309
+
310
+
311
+ # convert torch tensor to single
312
+ def tensor2single(img):
313
+ img = img.data.squeeze().float().cpu().numpy()
314
+ if img.ndim == 3:
315
+ img = np.transpose(img, (1, 2, 0))
316
+
317
+ return img
318
+
319
+ # convert torch tensor to single
320
+ def tensor2single3(img):
321
+ img = img.data.squeeze().float().cpu().numpy()
322
+ if img.ndim == 3:
323
+ img = np.transpose(img, (1, 2, 0))
324
+ elif img.ndim == 2:
325
+ img = np.expand_dims(img, axis=2)
326
+ return img
327
+
328
+
329
+ def single2tensor5(img):
330
+ return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)
331
+
332
+
333
+ def single32tensor5(img):
334
+ return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
335
+
336
+
337
+ def single42tensor4(img):
338
+ return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
339
+
340
+
341
+ # from skimage.io import imread, imsave
342
+ def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
343
+ '''
344
+ Converts a torch Tensor into an image Numpy array of BGR channel order
345
+ Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
346
+ Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
347
+ '''
348
+ tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp
349
+ tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
350
+ n_dim = tensor.dim()
351
+ if n_dim == 4:
352
+ n_img = len(tensor)
353
+ img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
354
+ img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
355
+ elif n_dim == 3:
356
+ img_np = tensor.numpy()
357
+ img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
358
+ elif n_dim == 2:
359
+ img_np = tensor.numpy()
360
+ else:
361
+ raise TypeError(
362
+ 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
363
+ if out_type == np.uint8:
364
+ img_np = (img_np * 255.0).round()
365
+ # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
366
+ return img_np.astype(out_type)
367
+
368
+
369
+ '''
370
+ # --------------------------------------------
371
+ # Augmentation, flipe and/or rotate
372
+ # --------------------------------------------
373
+ # The following two are enough.
374
+ # (1) augmet_img: numpy image of WxHxC or WxH
375
+ # (2) augment_img_tensor4: tensor image 1xCxWxH
376
+ # --------------------------------------------
377
+ '''
378
+
379
+
380
+ def augment_img(img, mode=0):
381
+ '''Kai Zhang (github: https://github.com/cszn)
382
+ '''
383
+ if mode == 0:
384
+ return img
385
+ elif mode == 1:
386
+ return np.flipud(np.rot90(img))
387
+ elif mode == 2:
388
+ return np.flipud(img)
389
+ elif mode == 3:
390
+ return np.rot90(img, k=3)
391
+ elif mode == 4:
392
+ return np.flipud(np.rot90(img, k=2))
393
+ elif mode == 5:
394
+ return np.rot90(img)
395
+ elif mode == 6:
396
+ return np.rot90(img, k=2)
397
+ elif mode == 7:
398
+ return np.flipud(np.rot90(img, k=3))
399
+
400
+
401
+ def augment_img_tensor4(img, mode=0):
402
+ '''Kai Zhang (github: https://github.com/cszn)
403
+ '''
404
+ if mode == 0:
405
+ return img
406
+ elif mode == 1:
407
+ return img.rot90(1, [2, 3]).flip([2])
408
+ elif mode == 2:
409
+ return img.flip([2])
410
+ elif mode == 3:
411
+ return img.rot90(3, [2, 3])
412
+ elif mode == 4:
413
+ return img.rot90(2, [2, 3]).flip([2])
414
+ elif mode == 5:
415
+ return img.rot90(1, [2, 3])
416
+ elif mode == 6:
417
+ return img.rot90(2, [2, 3])
418
+ elif mode == 7:
419
+ return img.rot90(3, [2, 3]).flip([2])
420
+
421
+
422
+ def augment_img_tensor(img, mode=0):
423
+ '''Kai Zhang (github: https://github.com/cszn)
424
+ '''
425
+ img_size = img.size()
426
+ img_np = img.data.cpu().numpy()
427
+ if len(img_size) == 3:
428
+ img_np = np.transpose(img_np, (1, 2, 0))
429
+ elif len(img_size) == 4:
430
+ img_np = np.transpose(img_np, (2, 3, 1, 0))
431
+ img_np = augment_img(img_np, mode=mode)
432
+ img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
433
+ if len(img_size) == 3:
434
+ img_tensor = img_tensor.permute(2, 0, 1)
435
+ elif len(img_size) == 4:
436
+ img_tensor = img_tensor.permute(3, 2, 0, 1)
437
+
438
+ return img_tensor.type_as(img)
439
+
440
+
441
+ def augment_img_np3(img, mode=0):
442
+ if mode == 0:
443
+ return img
444
+ elif mode == 1:
445
+ return img.transpose(1, 0, 2)
446
+ elif mode == 2:
447
+ return img[::-1, :, :]
448
+ elif mode == 3:
449
+ img = img[::-1, :, :]
450
+ img = img.transpose(1, 0, 2)
451
+ return img
452
+ elif mode == 4:
453
+ return img[:, ::-1, :]
454
+ elif mode == 5:
455
+ img = img[:, ::-1, :]
456
+ img = img.transpose(1, 0, 2)
457
+ return img
458
+ elif mode == 6:
459
+ img = img[:, ::-1, :]
460
+ img = img[::-1, :, :]
461
+ return img
462
+ elif mode == 7:
463
+ img = img[:, ::-1, :]
464
+ img = img[::-1, :, :]
465
+ img = img.transpose(1, 0, 2)
466
+ return img
467
+
468
+
469
+ def augment_imgs(img_list, hflip=True, rot=True):
470
+ # horizontal flip OR rotate
471
+ hflip = hflip and random.random() < 0.5
472
+ vflip = rot and random.random() < 0.5
473
+ rot90 = rot and random.random() < 0.5
474
+
475
+ def _augment(img):
476
+ if hflip:
477
+ img = img[:, ::-1, :]
478
+ if vflip:
479
+ img = img[::-1, :, :]
480
+ if rot90:
481
+ img = img.transpose(1, 0, 2)
482
+ return img
483
+
484
+ return [_augment(img) for img in img_list]
485
+
486
+
487
+ '''
488
+ # --------------------------------------------
489
+ # modcrop and shave
490
+ # --------------------------------------------
491
+ '''
492
+
493
+
494
+ def modcrop(img_in, scale):
495
+ # img_in: Numpy, HWC or HW
496
+ img = np.copy(img_in)
497
+ if img.ndim == 2:
498
+ H, W = img.shape
499
+ H_r, W_r = H % scale, W % scale
500
+ img = img[:H - H_r, :W - W_r]
501
+ elif img.ndim == 3:
502
+ H, W, C = img.shape
503
+ H_r, W_r = H % scale, W % scale
504
+ img = img[:H - H_r, :W - W_r, :]
505
+ else:
506
+ raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
507
+ return img
508
+
509
+
510
+ def shave(img_in, border=0):
511
+ # img_in: Numpy, HWC or HW
512
+ img = np.copy(img_in)
513
+ h, w = img.shape[:2]
514
+ img = img[border:h-border, border:w-border]
515
+ return img
516
+
517
+
518
+ '''
519
+ # --------------------------------------------
520
+ # image processing process on numpy image
521
+ # channel_convert(in_c, tar_type, img_list):
522
+ # rgb2ycbcr(img, only_y=True):
523
+ # bgr2ycbcr(img, only_y=True):
524
+ # ycbcr2rgb(img):
525
+ # --------------------------------------------
526
+ '''
527
+
528
+
529
+ def rgb2ycbcr(img, only_y=True):
530
+ '''same as matlab rgb2ycbcr
531
+ only_y: only return Y channel
532
+ Input:
533
+ uint8, [0, 255]
534
+ float, [0, 1]
535
+ '''
536
+ in_img_type = img.dtype
537
+ img.astype(np.float32)
538
+ if in_img_type != np.uint8:
539
+ img *= 255.
540
+ # convert
541
+ if only_y:
542
+ rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
543
+ else:
544
+ rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
545
+ [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
546
+ if in_img_type == np.uint8:
547
+ rlt = rlt.round()
548
+ else:
549
+ rlt /= 255.
550
+ return rlt.astype(in_img_type)
551
+
552
+
553
+ def ycbcr2rgb(img):
554
+ '''same as matlab ycbcr2rgb
555
+ Input:
556
+ uint8, [0, 255]
557
+ float, [0, 1]
558
+ '''
559
+ in_img_type = img.dtype
560
+ img.astype(np.float32)
561
+ if in_img_type != np.uint8:
562
+ img *= 255.
563
+ # convert
564
+ rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
565
+ [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
566
+ if in_img_type == np.uint8:
567
+ rlt = rlt.round()
568
+ else:
569
+ rlt /= 255.
570
+ return rlt.astype(in_img_type)
571
+
572
+
573
+ def bgr2ycbcr(img, only_y=True):
574
+ '''bgr version of rgb2ycbcr
575
+ only_y: only return Y channel
576
+ Input:
577
+ uint8, [0, 255]
578
+ float, [0, 1]
579
+ '''
580
+ in_img_type = img.dtype
581
+ img.astype(np.float32)
582
+ if in_img_type != np.uint8:
583
+ img *= 255.
584
+ # convert
585
+ if only_y:
586
+ rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
587
+ else:
588
+ rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
589
+ [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
590
+ if in_img_type == np.uint8:
591
+ rlt = rlt.round()
592
+ else:
593
+ rlt /= 255.
594
+ return rlt.astype(in_img_type)
595
+
596
+
597
+ def channel_convert(in_c, tar_type, img_list):
598
+ # conversion among BGR, gray and y
599
+ if in_c == 3 and tar_type == 'gray': # BGR to gray
600
+ gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
601
+ return [np.expand_dims(img, axis=2) for img in gray_list]
602
+ elif in_c == 3 and tar_type == 'y': # BGR to y
603
+ y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
604
+ return [np.expand_dims(img, axis=2) for img in y_list]
605
+ elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
606
+ return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
607
+ else:
608
+ return img_list
609
+
610
+
611
+ '''
612
+ # --------------------------------------------
613
+ # metric, PSNR and SSIM
614
+ # --------------------------------------------
615
+ '''
616
+
617
+
618
+ # --------------------------------------------
619
+ # PSNR
620
+ # --------------------------------------------
621
+ def calculate_psnr(img1, img2, border=0):
622
+ # img1 and img2 have range [0, 255]
623
+ #img1 = img1.squeeze()
624
+ #img2 = img2.squeeze()
625
+ if not img1.shape == img2.shape:
626
+ raise ValueError('Input images must have the same dimensions.')
627
+ h, w = img1.shape[:2]
628
+ img1 = img1[border:h-border, border:w-border]
629
+ img2 = img2[border:h-border, border:w-border]
630
+
631
+ img1 = img1.astype(np.float64)
632
+ img2 = img2.astype(np.float64)
633
+ mse = np.mean((img1 - img2)**2)
634
+ if mse == 0:
635
+ return float('inf')
636
+ return 20 * math.log10(255.0 / math.sqrt(mse))
637
+
638
+
639
+ # --------------------------------------------
640
+ # SSIM
641
+ # --------------------------------------------
642
+ def calculate_ssim(img1, img2, border=0):
643
+ '''calculate SSIM
644
+ the same outputs as MATLAB's
645
+ img1, img2: [0, 255]
646
+ '''
647
+ #img1 = img1.squeeze()
648
+ #img2 = img2.squeeze()
649
+ if not img1.shape == img2.shape:
650
+ raise ValueError('Input images must have the same dimensions.')
651
+ h, w = img1.shape[:2]
652
+ img1 = img1[border:h-border, border:w-border]
653
+ img2 = img2[border:h-border, border:w-border]
654
+
655
+ if img1.ndim == 2:
656
+ return ssim(img1, img2)
657
+ elif img1.ndim == 3:
658
+ if img1.shape[2] == 3:
659
+ ssims = []
660
+ for i in range(3):
661
+ ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
662
+ return np.array(ssims).mean()
663
+ elif img1.shape[2] == 1:
664
+ return ssim(np.squeeze(img1), np.squeeze(img2))
665
+ else:
666
+ raise ValueError('Wrong input image dimensions.')
667
+
668
+
669
+ def ssim(img1, img2):
670
+ C1 = (0.01 * 255)**2
671
+ C2 = (0.03 * 255)**2
672
+
673
+ img1 = img1.astype(np.float64)
674
+ img2 = img2.astype(np.float64)
675
+ kernel = cv2.getGaussianKernel(11, 1.5)
676
+ window = np.outer(kernel, kernel.transpose())
677
+
678
+ mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
679
+ mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
680
+ mu1_sq = mu1**2
681
+ mu2_sq = mu2**2
682
+ mu1_mu2 = mu1 * mu2
683
+ sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
684
+ sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
685
+ sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
686
+
687
+ ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
688
+ (sigma1_sq + sigma2_sq + C2))
689
+ return ssim_map.mean()
690
+
691
+
692
+ '''
693
+ # --------------------------------------------
694
+ # matlab's bicubic imresize (numpy and torch) [0, 1]
695
+ # --------------------------------------------
696
+ '''
697
+
698
+
699
+ # matlab 'imresize' function, now only support 'bicubic'
700
+ def cubic(x):
701
+ absx = torch.abs(x)
702
+ absx2 = absx**2
703
+ absx3 = absx**3
704
+ return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
705
+ (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
706
+
707
+
708
+ def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
709
+ if (scale < 1) and (antialiasing):
710
+ # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
711
+ kernel_width = kernel_width / scale
712
+
713
+ # Output-space coordinates
714
+ x = torch.linspace(1, out_length, out_length)
715
+
716
+ # Input-space coordinates. Calculate the inverse mapping such that 0.5
717
+ # in output space maps to 0.5 in input space, and 0.5+scale in output
718
+ # space maps to 1.5 in input space.
719
+ u = x / scale + 0.5 * (1 - 1 / scale)
720
+
721
+ # What is the left-most pixel that can be involved in the computation?
722
+ left = torch.floor(u - kernel_width / 2)
723
+
724
+ # What is the maximum number of pixels that can be involved in the
725
+ # computation? Note: it's OK to use an extra pixel here; if the
726
+ # corresponding weights are all zero, it will be eliminated at the end
727
+ # of this function.
728
+ P = math.ceil(kernel_width) + 2
729
+
730
+ # The indices of the input pixels involved in computing the k-th output
731
+ # pixel are in row k of the indices matrix.
732
+ indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
733
+ 1, P).expand(out_length, P)
734
+
735
+ # The weights used to compute the k-th output pixel are in row k of the
736
+ # weights matrix.
737
+ distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
738
+ # apply cubic kernel
739
+ if (scale < 1) and (antialiasing):
740
+ weights = scale * cubic(distance_to_center * scale)
741
+ else:
742
+ weights = cubic(distance_to_center)
743
+ # Normalize the weights matrix so that each row sums to 1.
744
+ weights_sum = torch.sum(weights, 1).view(out_length, 1)
745
+ weights = weights / weights_sum.expand(out_length, P)
746
+
747
+ # If a column in weights is all zero, get rid of it. only consider the first and last column.
748
+ weights_zero_tmp = torch.sum((weights == 0), 0)
749
+ if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
750
+ indices = indices.narrow(1, 1, P - 2)
751
+ weights = weights.narrow(1, 1, P - 2)
752
+ if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
753
+ indices = indices.narrow(1, 0, P - 2)
754
+ weights = weights.narrow(1, 0, P - 2)
755
+ weights = weights.contiguous()
756
+ indices = indices.contiguous()
757
+ sym_len_s = -indices.min() + 1
758
+ sym_len_e = indices.max() - in_length
759
+ indices = indices + sym_len_s - 1
760
+ return weights, indices, int(sym_len_s), int(sym_len_e)
761
+
762
+
763
+ # --------------------------------------------
764
+ # imresize for tensor image [0, 1]
765
+ # --------------------------------------------
766
+ def imresize(img, scale, antialiasing=True):
767
+ # Now the scale should be the same for H and W
768
+ # input: img: pytorch tensor, CHW or HW [0,1]
769
+ # output: CHW or HW [0,1] w/o round
770
+ need_squeeze = True if img.dim() == 2 else False
771
+ if need_squeeze:
772
+ img.unsqueeze_(0)
773
+ in_C, in_H, in_W = img.size()
774
+ out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
775
+ kernel_width = 4
776
+ kernel = 'cubic'
777
+
778
+ # Return the desired dimension order for performing the resize. The
779
+ # strategy is to perform the resize first along the dimension with the
780
+ # smallest scale factor.
781
+ # Now we do not support this.
782
+
783
+ # get weights and indices
784
+ weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
785
+ in_H, out_H, scale, kernel, kernel_width, antialiasing)
786
+ weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
787
+ in_W, out_W, scale, kernel, kernel_width, antialiasing)
788
+ # process H dimension
789
+ # symmetric copying
790
+ img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
791
+ img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
792
+
793
+ sym_patch = img[:, :sym_len_Hs, :]
794
+ inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
795
+ sym_patch_inv = sym_patch.index_select(1, inv_idx)
796
+ img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
797
+
798
+ sym_patch = img[:, -sym_len_He:, :]
799
+ inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
800
+ sym_patch_inv = sym_patch.index_select(1, inv_idx)
801
+ img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
802
+
803
+ out_1 = torch.FloatTensor(in_C, out_H, in_W)
804
+ kernel_width = weights_H.size(1)
805
+ for i in range(out_H):
806
+ idx = int(indices_H[i][0])
807
+ for j in range(out_C):
808
+ out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
809
+
810
+ # process W dimension
811
+ # symmetric copying
812
+ out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
813
+ out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
814
+
815
+ sym_patch = out_1[:, :, :sym_len_Ws]
816
+ inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
817
+ sym_patch_inv = sym_patch.index_select(2, inv_idx)
818
+ out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
819
+
820
+ sym_patch = out_1[:, :, -sym_len_We:]
821
+ inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
822
+ sym_patch_inv = sym_patch.index_select(2, inv_idx)
823
+ out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
824
+
825
+ out_2 = torch.FloatTensor(in_C, out_H, out_W)
826
+ kernel_width = weights_W.size(1)
827
+ for i in range(out_W):
828
+ idx = int(indices_W[i][0])
829
+ for j in range(out_C):
830
+ out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
831
+ if need_squeeze:
832
+ out_2.squeeze_()
833
+ return out_2
834
+
835
+
836
+ # --------------------------------------------
837
+ # imresize for numpy image [0, 1]
838
+ # --------------------------------------------
839
+ def imresize_np(img, scale, antialiasing=True):
840
+ # Now the scale should be the same for H and W
841
+ # input: img: Numpy, HWC or HW [0,1]
842
+ # output: HWC or HW [0,1] w/o round
843
+ img = torch.from_numpy(img)
844
+ need_squeeze = True if img.dim() == 2 else False
845
+ if need_squeeze:
846
+ img.unsqueeze_(2)
847
+
848
+ in_H, in_W, in_C = img.size()
849
+ out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
850
+ kernel_width = 4
851
+ kernel = 'cubic'
852
+
853
+ # Return the desired dimension order for performing the resize. The
854
+ # strategy is to perform the resize first along the dimension with the
855
+ # smallest scale factor.
856
+ # Now we do not support this.
857
+
858
+ # get weights and indices
859
+ weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
860
+ in_H, out_H, scale, kernel, kernel_width, antialiasing)
861
+ weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
862
+ in_W, out_W, scale, kernel, kernel_width, antialiasing)
863
+ # process H dimension
864
+ # symmetric copying
865
+ img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
866
+ img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
867
+
868
+ sym_patch = img[:sym_len_Hs, :, :]
869
+ inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
870
+ sym_patch_inv = sym_patch.index_select(0, inv_idx)
871
+ img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
872
+
873
+ sym_patch = img[-sym_len_He:, :, :]
874
+ inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
875
+ sym_patch_inv = sym_patch.index_select(0, inv_idx)
876
+ img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
877
+
878
+ out_1 = torch.FloatTensor(out_H, in_W, in_C)
879
+ kernel_width = weights_H.size(1)
880
+ for i in range(out_H):
881
+ idx = int(indices_H[i][0])
882
+ for j in range(out_C):
883
+ out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
884
+
885
+ # process W dimension
886
+ # symmetric copying
887
+ out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
888
+ out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
889
+
890
+ sym_patch = out_1[:, :sym_len_Ws, :]
891
+ inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
892
+ sym_patch_inv = sym_patch.index_select(1, inv_idx)
893
+ out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
894
+
895
+ sym_patch = out_1[:, -sym_len_We:, :]
896
+ inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
897
+ sym_patch_inv = sym_patch.index_select(1, inv_idx)
898
+ out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
899
+
900
+ out_2 = torch.FloatTensor(out_H, out_W, in_C)
901
+ kernel_width = weights_W.size(1)
902
+ for i in range(out_W):
903
+ idx = int(indices_W[i][0])
904
+ for j in range(out_C):
905
+ out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
906
+ if need_squeeze:
907
+ out_2.squeeze_()
908
+
909
+ return out_2.numpy()
910
+
911
+
912
+ if __name__ == '__main__':
913
+ print('---')
914
+ # img = imread_uint('test.bmp', 3)
915
+ # img = uint2single(img)
916
+ # img_bicubic = imresize_np(img, 1/4)
3DTopia/ldm/modules/losses/__init__.py ADDED
@@ -0,0 +1 @@
 
1
+ from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator
3DTopia/ldm/modules/losses/contperceptual.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from taming.modules.losses.vqperceptual import * # TODO: taming dependency yes/no?
5
+
6
+
7
+ class LPIPSWithDiscriminator(nn.Module):
8
+ def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0,
9
+ disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
10
+ perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
11
+ disc_loss="hinge"):
12
+
13
+ super().__init__()
14
+ assert disc_loss in ["hinge", "vanilla"]
15
+ self.kl_weight = kl_weight
16
+ self.pixel_weight = pixelloss_weight
17
+ self.perceptual_loss = LPIPS().eval()
18
+ self.perceptual_weight = perceptual_weight
19
+ # output log variance
20
+ self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
21
+
22
+ self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
23
+ n_layers=disc_num_layers,
24
+ use_actnorm=use_actnorm
25
+ ).apply(weights_init)
26
+ self.discriminator_iter_start = disc_start
27
+ self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss
28
+ self.disc_factor = disc_factor
29
+ self.discriminator_weight = disc_weight
30
+ self.disc_conditional = disc_conditional
31
+
32
+ def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
33
+ if last_layer is not None:
34
+ nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
35
+ g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
36
+ else:
37
+ nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
38
+ g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
39
+
40
+ d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
41
+ d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
42
+ d_weight = d_weight * self.discriminator_weight
43
+ return d_weight
44
+
45
+ def forward(self, inputs, reconstructions, posteriors, optimizer_idx,
46
+ global_step, last_layer=None, cond=None, split="train",
47
+ weights=None):
48
+ rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
49
+ if self.perceptual_weight > 0:
50
+ p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
51
+ rec_loss = rec_loss + self.perceptual_weight * p_loss
52
+
53
+ nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
54
+ weighted_nll_loss = nll_loss
55
+ if weights is not None:
56
+ weighted_nll_loss = weights*nll_loss
57
+ weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
58
+ nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
59
+ kl_loss = posteriors.kl()
60
+ kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
61
+
62
+ # now the GAN part
63
+ if optimizer_idx == 0:
64
+ # generator update
65
+ if cond is None:
66
+ assert not self.disc_conditional
67
+ logits_fake = self.discriminator(reconstructions.contiguous())
68
+ else:
69
+ assert self.disc_conditional
70
+ logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
71
+ g_loss = -torch.mean(logits_fake)
72
+
73
+ if self.disc_factor > 0.0:
74
+ try:
75
+ d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
76
+ except RuntimeError:
77
+ assert not self.training
78
+ d_weight = torch.tensor(0.0)
79
+ else:
80
+ d_weight = torch.tensor(0.0)
81
+
82
+ disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
83
+ loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss
84
+
85
+ log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/logvar".format(split): self.logvar.detach(),
86
+ "{}/kl_loss".format(split): kl_loss.detach().mean(), "{}/nll_loss".format(split): nll_loss.detach().mean(),
87
+ "{}/rec_loss".format(split): rec_loss.detach().mean(),
88
+ "{}/d_weight".format(split): d_weight.detach(),
89
+ "{}/disc_factor".format(split): torch.tensor(disc_factor),
90
+ "{}/g_loss".format(split): g_loss.detach().mean(),
91
+ }
92
+ return loss, log
93
+
94
+ if optimizer_idx == 1:
95
+ # second pass for discriminator update
96
+ if cond is None:
97
+ logits_real = self.discriminator(inputs.contiguous().detach())
98
+ logits_fake = self.discriminator(reconstructions.contiguous().detach())
99
+ else:
100
+ logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
101
+ logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
102
+
103
+ disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
104
+ d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
105
+
106
+ log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
107
+ "{}/logits_real".format(split): logits_real.detach().mean(),
108
+ "{}/logits_fake".format(split): logits_fake.detach().mean()
109
+ }
110
+ return d_loss, log
111
+
3DTopia/ldm/modules/losses/vqperceptual.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import torch.nn.functional as F
4
+ from einops import repeat
5
+
6
+ from taming.modules.discriminator.model import NLayerDiscriminator, weights_init
7
+ from taming.modules.losses.lpips import LPIPS
8
+ from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss
9
+
10
+
11
+ def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights):
12
+ assert weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0]
13
+ loss_real = torch.mean(F.relu(1. - logits_real), dim=[1,2,3])
14
+ loss_fake = torch.mean(F.relu(1. + logits_fake), dim=[1,2,3])
15
+ loss_real = (weights * loss_real).sum() / weights.sum()
16
+ loss_fake = (weights * loss_fake).sum() / weights.sum()
17
+ d_loss = 0.5 * (loss_real + loss_fake)
18
+ return d_loss
19
+
20
+ def adopt_weight(weight, global_step, threshold=0, value=0.):
21
+ if global_step < threshold:
22
+ weight = value
23
+ return weight
24
+
25
+
26
+ def measure_perplexity(predicted_indices, n_embed):
27
+ # src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py
28
+ # eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally
29
+ encodings = F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed)
30
+ avg_probs = encodings.mean(0)
31
+ perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp()
32
+ cluster_use = torch.sum(avg_probs > 0)
33
+ return perplexity, cluster_use
34
+
35
+ def l1(x, y):
36
+ return torch.abs(x-y)
37
+
38
+
39
+ def l2(x, y):
40
+ return torch.pow((x-y), 2)
41
+
42
+
43
+ class VQLPIPSWithDiscriminator(nn.Module):
44
+ def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0,
45
+ disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
46
+ perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
47
+ disc_ndf=64, disc_loss="hinge", n_classes=None, perceptual_loss="lpips",
48
+ pixel_loss="l1"):
49
+ super().__init__()
50
+ assert disc_loss in ["hinge", "vanilla"]
51
+ assert perceptual_loss in ["lpips", "clips", "dists"]
52
+ assert pixel_loss in ["l1", "l2"]
53
+ self.codebook_weight = codebook_weight
54
+ self.pixel_weight = pixelloss_weight
55
+ if perceptual_loss == "lpips":
56
+ print(f"{self.__class__.__name__}: Running with LPIPS.")
57
+ self.perceptual_loss = LPIPS().eval()
58
+ else:
59
+ raise ValueError(f"Unknown perceptual loss: >> {perceptual_loss} <<")
60
+ self.perceptual_weight = perceptual_weight
61
+
62
+ if pixel_loss == "l1":
63
+ self.pixel_loss = l1
64
+ else:
65
+ self.pixel_loss = l2
66
+
67
+ self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
68
+ n_layers=disc_num_layers,
69
+ use_actnorm=use_actnorm,
70
+ ndf=disc_ndf
71
+ ).apply(weights_init)
72
+ self.discriminator_iter_start = disc_start
73
+ if disc_loss == "hinge":
74
+ self.disc_loss = hinge_d_loss
75
+ elif disc_loss == "vanilla":
76
+ self.disc_loss = vanilla_d_loss
77
+ else:
78
+ raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
79
+ print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.")
80
+ self.disc_factor = disc_factor
81
+ self.discriminator_weight = disc_weight
82
+ self.disc_conditional = disc_conditional
83
+ self.n_classes = n_classes
84
+
85
+ def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
86
+ if last_layer is not None:
87
+ nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
88
+ g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
89
+ else:
90
+ nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
91
+ g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
92
+
93
+ d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
94
+ d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
95
+ d_weight = d_weight * self.discriminator_weight
96
+ return d_weight
97
+
98
+ def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx,
99
+ global_step, last_layer=None, cond=None, split="train", predicted_indices=None):
100
+ if not exists(codebook_loss):
101
+ codebook_loss = torch.tensor([0.]).to(inputs.device)
102
+ #rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
103
+ rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous())
104
+ if self.perceptual_weight > 0:
105
+ p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
106
+ rec_loss = rec_loss + self.perceptual_weight * p_loss
107
+ else:
108
+ p_loss = torch.tensor([0.0])
109
+
110
+ nll_loss = rec_loss
111
+ #nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
112
+ nll_loss = torch.mean(nll_loss)
113
+
114
+ # now the GAN part
115
+ if optimizer_idx == 0:
116
+ # generator update
117
+ if cond is None:
118
+ assert not self.disc_conditional
119
+ logits_fake = self.discriminator(reconstructions.contiguous())
120
+ else:
121
+ assert self.disc_conditional
122
+ logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
123
+ g_loss = -torch.mean(logits_fake)
124
+
125
+ try:
126
+ d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
127
+ except RuntimeError:
128
+ assert not self.training
129
+ d_weight = torch.tensor(0.0)
130
+
131
+ disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
132
+ loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean()
133
+
134
+ log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
135
+ "{}/quant_loss".format(split): codebook_loss.detach().mean(),
136
+ "{}/nll_loss".format(split): nll_loss.detach().mean(),
137
+ "{}/rec_loss".format(split): rec_loss.detach().mean(),
138
+ "{}/p_loss".format(split): p_loss.detach().mean(),
139
+ "{}/d_weight".format(split): d_weight.detach(),
140
+ "{}/disc_factor".format(split): torch.tensor(disc_factor),
141
+ "{}/g_loss".format(split): g_loss.detach().mean(),
142
+ }
143
+ if predicted_indices is not None:
144
+ assert self.n_classes is not None
145
+ with torch.no_grad():
146
+ perplexity, cluster_usage = measure_perplexity(predicted_indices, self.n_classes)
147
+ log[f"{split}/perplexity"] = perplexity
148
+ log[f"{split}/cluster_usage"] = cluster_usage
149
+ return loss, log
150
+
151
+ if optimizer_idx == 1:
152
+ # second pass for discriminator update
153
+ if cond is None:
154
+ logits_real = self.discriminator(inputs.contiguous().detach())
155
+ logits_fake = self.discriminator(reconstructions.contiguous().detach())
156
+ else:
157
+ logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
158
+ logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
159
+
160
+ disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
161
+ d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
162
+
163
+ log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
164
+ "{}/logits_real".format(split): logits_real.detach().mean(),
165
+ "{}/logits_fake".format(split): logits_fake.detach().mean()
166
+ }
167
+ return d_loss, log
3DTopia/ldm/modules/x_transformer.py ADDED
@@ -0,0 +1,641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers"""
2
+ import torch
3
+ from torch import nn, einsum
4
+ import torch.nn.functional as F
5
+ from functools import partial
6
+ from inspect import isfunction
7
+ from collections import namedtuple
8
+ from einops import rearrange, repeat, reduce
9
+
10
+ # constants
11
+
12
+ DEFAULT_DIM_HEAD = 64
13
+
14
+ Intermediates = namedtuple('Intermediates', [
15
+ 'pre_softmax_attn',
16
+ 'post_softmax_attn'
17
+ ])
18
+
19
+ LayerIntermediates = namedtuple('Intermediates', [
20
+ 'hiddens',
21
+ 'attn_intermediates'
22
+ ])
23
+
24
+
25
+ class AbsolutePositionalEmbedding(nn.Module):
26
+ def __init__(self, dim, max_seq_len):
27
+ super().__init__()
28
+ self.emb = nn.Embedding(max_seq_len, dim)
29
+ self.init_()
30
+
31
+ def init_(self):
32
+ nn.init.normal_(self.emb.weight, std=0.02)
33
+
34
+ def forward(self, x):
35
+ n = torch.arange(x.shape[1], device=x.device)
36
+ return self.emb(n)[None, :, :]
37
+
38
+
39
+ class FixedPositionalEmbedding(nn.Module):
40
+ def __init__(self, dim):
41
+ super().__init__()
42
+ inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
43
+ self.register_buffer('inv_freq', inv_freq)
44
+
45
+ def forward(self, x, seq_dim=1, offset=0):
46
+ t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset
47
+ sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
48
+ emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
49
+ return emb[None, :, :]
50
+
51
+
52
+ # helpers
53
+
54
+ def exists(val):
55
+ return val is not None
56
+
57
+
58
+ def default(val, d):
59
+ if exists(val):
60
+ return val
61
+ return d() if isfunction(d) else d
62
+
63
+
64
+ def always(val):
65
+ def inner(*args, **kwargs):
66
+ return val
67
+ return inner
68
+
69
+
70
+ def not_equals(val):
71
+ def inner(x):
72
+ return x != val
73
+ return inner
74
+
75
+
76
+ def equals(val):
77
+ def inner(x):
78
+ return x == val
79
+ return inner
80
+
81
+
82
+ def max_neg_value(tensor):
83
+ return -torch.finfo(tensor.dtype).max
84
+
85
+
86
+ # keyword argument helpers
87
+
88
+ def pick_and_pop(keys, d):
89
+ values = list(map(lambda key: d.pop(key), keys))
90
+ return dict(zip(keys, values))
91
+
92
+
93
+ def group_dict_by_key(cond, d):
94
+ return_val = [dict(), dict()]
95
+ for key in d.keys():
96
+ match = bool(cond(key))
97
+ ind = int(not match)
98
+ return_val[ind][key] = d[key]
99
+ return (*return_val,)
100
+
101
+
102
+ def string_begins_with(prefix, str):
103
+ return str.startswith(prefix)
104
+
105
+
106
+ def group_by_key_prefix(prefix, d):
107
+ return group_dict_by_key(partial(string_begins_with, prefix), d)
108
+
109
+
110
+ def groupby_prefix_and_trim(prefix, d):
111
+ kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
112
+ kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
113
+ return kwargs_without_prefix, kwargs
114
+
115
+
116
+ # classes
117
+ class Scale(nn.Module):
118
+ def __init__(self, value, fn):
119
+ super().__init__()
120
+ self.value = value
121
+ self.fn = fn
122
+
123
+ def forward(self, x, **kwargs):
124
+ x, *rest = self.fn(x, **kwargs)
125
+ return (x * self.value, *rest)
126
+
127
+
128
+ class Rezero(nn.Module):
129
+ def __init__(self, fn):
130
+ super().__init__()
131
+ self.fn = fn
132
+ self.g = nn.Parameter(torch.zeros(1))
133
+
134
+ def forward(self, x, **kwargs):
135
+ x, *rest = self.fn(x, **kwargs)
136
+ return (x * self.g, *rest)
137
+
138
+
139
+ class ScaleNorm(nn.Module):
140
+ def __init__(self, dim, eps=1e-5):
141
+ super().__init__()
142
+ self.scale = dim ** -0.5
143
+ self.eps = eps
144
+ self.g = nn.Parameter(torch.ones(1))
145
+
146
+ def forward(self, x):
147
+ norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
148
+ return x / norm.clamp(min=self.eps) * self.g
149
+
150
+
151
+ class RMSNorm(nn.Module):
152
+ def __init__(self, dim, eps=1e-8):
153
+ super().__init__()
154
+ self.scale = dim ** -0.5
155
+ self.eps = eps
156
+ self.g = nn.Parameter(torch.ones(dim))
157
+
158
+ def forward(self, x):
159
+ norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
160
+ return x / norm.clamp(min=self.eps) * self.g
161
+
162
+
163
+ class Residual(nn.Module):
164
+ def forward(self, x, residual):
165
+ return x + residual
166
+
167
+
168
+ class GRUGating(nn.Module):
169
+ def __init__(self, dim):
170
+ super().__init__()
171
+ self.gru = nn.GRUCell(dim, dim)
172
+
173
+ def forward(self, x, residual):
174
+ gated_output = self.gru(
175
+ rearrange(x, 'b n d -> (b n) d'),
176
+ rearrange(residual, 'b n d -> (b n) d')
177
+ )
178
+
179
+ return gated_output.reshape_as(x)
180
+
181
+
182
+ # feedforward
183
+
184
+ class GEGLU(nn.Module):
185
+ def __init__(self, dim_in, dim_out):
186
+ super().__init__()
187
+ self.proj = nn.Linear(dim_in, dim_out * 2)
188
+
189
+ def forward(self, x):
190
+ x, gate = self.proj(x).chunk(2, dim=-1)
191
+ return x * F.gelu(gate)
192
+
193
+
194
+ class FeedForward(nn.Module):
195
+ def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
196
+ super().__init__()
197
+ inner_dim = int(dim * mult)
198
+ dim_out = default(dim_out, dim)
199
+ project_in = nn.Sequential(
200
+ nn.Linear(dim, inner_dim),
201
+ nn.GELU()
202
+ ) if not glu else GEGLU(dim, inner_dim)
203
+
204
+ self.net = nn.Sequential(
205
+ project_in,
206
+ nn.Dropout(dropout),
207
+ nn.Linear(inner_dim, dim_out)
208
+ )
209
+
210
+ def forward(self, x):
211
+ return self.net(x)
212
+
213
+
214
+ # attention.
215
+ class Attention(nn.Module):
216
+ def __init__(
217
+ self,
218
+ dim,
219
+ dim_head=DEFAULT_DIM_HEAD,
220
+ heads=8,
221
+ causal=False,
222
+ mask=None,
223
+ talking_heads=False,
224
+ sparse_topk=None,
225
+ use_entmax15=False,
226
+ num_mem_kv=0,
227
+ dropout=0.,
228
+ on_attn=False
229
+ ):
230
+ super().__init__()
231
+ if use_entmax15:
232
+ raise NotImplementedError("Check out entmax activation instead of softmax activation!")
233
+ self.scale = dim_head ** -0.5
234
+ self.heads = heads
235
+ self.causal = causal
236
+ self.mask = mask
237
+
238
+ inner_dim = dim_head * heads
239
+
240
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
241
+ self.to_k = nn.Linear(dim, inner_dim, bias=False)
242
+ self.to_v = nn.Linear(dim, inner_dim, bias=False)
243
+ self.dropout = nn.Dropout(dropout)
244
+
245
+ # talking heads
246
+ self.talking_heads = talking_heads
247
+ if talking_heads:
248
+ self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads))
249
+ self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads))
250
+
251
+ # explicit topk sparse attention
252
+ self.sparse_topk = sparse_topk
253
+
254
+ # entmax
255
+ #self.attn_fn = entmax15 if use_entmax15 else F.softmax
256
+ self.attn_fn = F.softmax
257
+
258
+ # add memory key / values
259
+ self.num_mem_kv = num_mem_kv
260
+ if num_mem_kv > 0:
261
+ self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
262
+ self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
263
+
264
+ # attention on attention
265
+ self.attn_on_attn = on_attn
266
+ self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim)
267
+
268
+ def forward(
269
+ self,
270
+ x,
271
+ context=None,
272
+ mask=None,
273
+ context_mask=None,
274
+ rel_pos=None,
275
+ sinusoidal_emb=None,
276
+ prev_attn=None,
277
+ mem=None
278
+ ):
279
+ b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device
280
+ kv_input = default(context, x)
281
+
282
+ q_input = x
283
+ k_input = kv_input
284
+ v_input = kv_input
285
+
286
+ if exists(mem):
287
+ k_input = torch.cat((mem, k_input), dim=-2)
288
+ v_input = torch.cat((mem, v_input), dim=-2)
289
+
290
+ if exists(sinusoidal_emb):
291
+ # in shortformer, the query would start at a position offset depending on the past cached memory
292
+ offset = k_input.shape[-2] - q_input.shape[-2]
293
+ q_input = q_input + sinusoidal_emb(q_input, offset=offset)
294
+ k_input = k_input + sinusoidal_emb(k_input)
295
+
296
+ q = self.to_q(q_input)
297
+ k = self.to_k(k_input)
298
+ v = self.to_v(v_input)
299
+
300
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
301
+
302
+ input_mask = None
303
+ if any(map(exists, (mask, context_mask))):
304
+ q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool())
305
+ k_mask = q_mask if not exists(context) else context_mask
306
+ k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool())
307
+ q_mask = rearrange(q_mask, 'b i -> b () i ()')
308
+ k_mask = rearrange(k_mask, 'b j -> b () () j')
309
+ input_mask = q_mask * k_mask
310
+
311
+ if self.num_mem_kv > 0:
312
+ mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v))
313
+ k = torch.cat((mem_k, k), dim=-2)
314
+ v = torch.cat((mem_v, v), dim=-2)
315
+ if exists(input_mask):
316
+ input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True)
317
+
318
+ dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
319
+ mask_value = max_neg_value(dots)
320
+
321
+ if exists(prev_attn):
322
+ dots = dots + prev_attn
323
+
324
+ pre_softmax_attn = dots
325
+
326
+ if talking_heads:
327
+ dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous()
328
+
329
+ if exists(rel_pos):
330
+ dots = rel_pos(dots)
331
+
332
+ if exists(input_mask):
333
+ dots.masked_fill_(~input_mask, mask_value)
334
+ del input_mask
335
+
336
+ if self.causal:
337
+ i, j = dots.shape[-2:]
338
+ r = torch.arange(i, device=device)
339
+ mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j')
340
+ mask = F.pad(mask, (j - i, 0), value=False)
341
+ dots.masked_fill_(mask, mask_value)
342
+ del mask
343
+
344
+ if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
345
+ top, _ = dots.topk(self.sparse_topk, dim=-1)
346
+ vk = top[..., -1].unsqueeze(-1).expand_as(dots)
347
+ mask = dots < vk
348
+ dots.masked_fill_(mask, mask_value)
349
+ del mask
350
+
351
+ attn = self.attn_fn(dots, dim=-1)
352
+ post_softmax_attn = attn
353
+
354
+ attn = self.dropout(attn)
355
+
356
+ if talking_heads:
357
+ attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous()
358
+
359
+ out = einsum('b h i j, b h j d -> b h i d', attn, v)
360
+ out = rearrange(out, 'b h n d -> b n (h d)')
361
+
362
+ intermediates = Intermediates(
363
+ pre_softmax_attn=pre_softmax_attn,
364
+ post_softmax_attn=post_softmax_attn
365
+ )
366
+
367
+ return self.to_out(out), intermediates
368
+
369
+
370
+ class AttentionLayers(nn.Module):
371
+ def __init__(
372
+ self,
373
+ dim,
374
+ depth,
375
+ heads=8,
376
+ causal=False,
377
+ cross_attend=False,
378
+ only_cross=False,
379
+ use_scalenorm=False,
380
+ use_rmsnorm=False,
381
+ use_rezero=False,
382
+ rel_pos_num_buckets=32,
383
+ rel_pos_max_distance=128,
384
+ position_infused_attn=False,
385
+ custom_layers=None,
386
+ sandwich_coef=None,
387
+ par_ratio=None,
388
+ residual_attn=False,
389
+ cross_residual_attn=False,
390
+ macaron=False,
391
+ pre_norm=True,
392
+ gate_residual=False,
393
+ **kwargs
394
+ ):
395
+ super().__init__()
396
+ ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
397
+ attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs)
398
+
399
+ dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
400
+
401
+ self.dim = dim
402
+ self.depth = depth
403
+ self.layers = nn.ModuleList([])
404
+
405
+ self.has_pos_emb = position_infused_attn
406
+ self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None
407
+ self.rotary_pos_emb = always(None)
408
+
409
+ assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
410
+ self.rel_pos = None
411
+
412
+ self.pre_norm = pre_norm
413
+
414
+ self.residual_attn = residual_attn
415
+ self.cross_residual_attn = cross_residual_attn
416
+
417
+ norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
418
+ norm_class = RMSNorm if use_rmsnorm else norm_class
419
+ norm_fn = partial(norm_class, dim)
420
+
421
+ norm_fn = nn.Identity if use_rezero else norm_fn
422
+ branch_fn = Rezero if use_rezero else None
423
+
424
+ if cross_attend and not only_cross:
425
+ default_block = ('a', 'c', 'f')
426
+ elif cross_attend and only_cross:
427
+ default_block = ('c', 'f')
428
+ else:
429
+ default_block = ('a', 'f')
430
+
431
+ if macaron:
432
+ default_block = ('f',) + default_block
433
+
434
+ if exists(custom_layers):
435
+ layer_types = custom_layers
436
+ elif exists(par_ratio):
437
+ par_depth = depth * len(default_block)
438
+ assert 1 < par_ratio <= par_depth, 'par ratio out of range'
439
+ default_block = tuple(filter(not_equals('f'), default_block))
440
+ par_attn = par_depth // par_ratio
441
+ depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
442
+ par_width = (depth_cut + depth_cut // par_attn) // par_attn
443
+ assert len(default_block) <= par_width, 'default block is too large for par_ratio'
444
+ par_block = default_block + ('f',) * (par_width - len(default_block))
445
+ par_head = par_block * par_attn
446
+ layer_types = par_head + ('f',) * (par_depth - len(par_head))
447
+ elif exists(sandwich_coef):
448
+ assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
449
+ layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
450
+ else:
451
+ layer_types = default_block * depth
452
+
453
+ self.layer_types = layer_types
454
+ self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
455
+
456
+ for layer_type in self.layer_types:
457
+ if layer_type == 'a':
458
+ layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs)
459
+ elif layer_type == 'c':
460
+ layer = Attention(dim, heads=heads, **attn_kwargs)
461
+ elif layer_type == 'f':
462
+ layer = FeedForward(dim, **ff_kwargs)
463
+ layer = layer if not macaron else Scale(0.5, layer)
464
+ else:
465
+ raise Exception(f'invalid layer type {layer_type}')
466
+
467
+ if isinstance(layer, Attention) and exists(branch_fn):
468
+ layer = branch_fn(layer)
469
+
470
+ if gate_residual:
471
+ residual_fn = GRUGating(dim)
472
+ else:
473
+ residual_fn = Residual()
474
+
475
+ self.layers.append(nn.ModuleList([
476
+ norm_fn(),
477
+ layer,
478
+ residual_fn
479
+ ]))
480
+
481
+ def forward(
482
+ self,
483
+ x,
484
+ context=None,
485
+ mask=None,
486
+ context_mask=None,
487
+ mems=None,
488
+ return_hiddens=False
489
+ ):
490
+ hiddens = []
491
+ intermediates = []
492
+ prev_attn = None
493
+ prev_cross_attn = None
494
+
495
+ mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
496
+
497
+ for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)):
498
+ is_last = ind == (len(self.layers) - 1)
499
+
500
+ if layer_type == 'a':
501
+ hiddens.append(x)
502
+ layer_mem = mems.pop(0)
503
+
504
+ residual = x
505
+
506
+ if self.pre_norm:
507
+ x = norm(x)
508
+
509
+ if layer_type == 'a':
510
+ out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos,
511
+ prev_attn=prev_attn, mem=layer_mem)
512
+ elif layer_type == 'c':
513
+ out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn)
514
+ elif layer_type == 'f':
515
+ out = block(x)
516
+
517
+ x = residual_fn(out, residual)
518
+
519
+ if layer_type in ('a', 'c'):
520
+ intermediates.append(inter)
521
+
522
+ if layer_type == 'a' and self.residual_attn:
523
+ prev_attn = inter.pre_softmax_attn
524
+ elif layer_type == 'c' and self.cross_residual_attn:
525
+ prev_cross_attn = inter.pre_softmax_attn
526
+
527
+ if not self.pre_norm and not is_last:
528
+ x = norm(x)
529
+
530
+ if return_hiddens:
531
+ intermediates = LayerIntermediates(
532
+ hiddens=hiddens,
533
+ attn_intermediates=intermediates
534
+ )
535
+
536
+ return x, intermediates
537
+
538
+ return x
539
+
540
+
541
+ class Encoder(AttentionLayers):
542
+ def __init__(self, **kwargs):
543
+ assert 'causal' not in kwargs, 'cannot set causality on encoder'
544
+ super().__init__(causal=False, **kwargs)
545
+
546
+
547
+
548
+ class TransformerWrapper(nn.Module):
549
+ def __init__(
550
+ self,
551
+ *,
552
+ num_tokens,
553
+ max_seq_len,
554
+ attn_layers,
555
+ emb_dim=None,
556
+ max_mem_len=0.,
557
+ emb_dropout=0.,
558
+ num_memory_tokens=None,
559
+ tie_embedding=False,
560
+ use_pos_emb=True
561
+ ):
562
+ super().__init__()
563
+ assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
564
+
565
+ dim = attn_layers.dim
566
+ emb_dim = default(emb_dim, dim)
567
+
568
+ self.max_seq_len = max_seq_len
569
+ self.max_mem_len = max_mem_len
570
+ self.num_tokens = num_tokens
571
+
572
+ self.token_emb = nn.Embedding(num_tokens, emb_dim)
573
+ self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if (
574
+ use_pos_emb and not attn_layers.has_pos_emb) else always(0)
575
+ self.emb_dropout = nn.Dropout(emb_dropout)
576
+
577
+ self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
578
+ self.attn_layers = attn_layers
579
+ self.norm = nn.LayerNorm(dim)
580
+
581
+ self.init_()
582
+
583
+ self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
584
+
585
+ # memory tokens (like [cls]) from Memory Transformers paper
586
+ num_memory_tokens = default(num_memory_tokens, 0)
587
+ self.num_memory_tokens = num_memory_tokens
588
+ if num_memory_tokens > 0:
589
+ self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
590
+
591
+ # let funnel encoder know number of memory tokens, if specified
592
+ if hasattr(attn_layers, 'num_memory_tokens'):
593
+ attn_layers.num_memory_tokens = num_memory_tokens
594
+
595
+ def init_(self):
596
+ nn.init.normal_(self.token_emb.weight, std=0.02)
597
+
598
+ def forward(
599
+ self,
600
+ x,
601
+ return_embeddings=False,
602
+ mask=None,
603
+ return_mems=False,
604
+ return_attn=False,
605
+ mems=None,
606
+ **kwargs
607
+ ):
608
+ b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens
609
+ x = self.token_emb(x)
610
+ x += self.pos_emb(x)
611
+ x = self.emb_dropout(x)
612
+
613
+ x = self.project_emb(x)
614
+
615
+ if num_mem > 0:
616
+ mem = repeat(self.memory_tokens, 'n d -> b n d', b=b)
617
+ x = torch.cat((mem, x), dim=1)
618
+
619
+ # auto-handle masking after appending memory tokens
620
+ if exists(mask):
621
+ mask = F.pad(mask, (num_mem, 0), value=True)
622
+
623
+ x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs)
624
+ x = self.norm(x)
625
+
626
+ mem, x = x[:, :num_mem], x[:, num_mem:]
627
+
628
+ out = self.to_logits(x) if not return_embeddings else x
629
+
630
+ if return_mems:
631
+ hiddens = intermediates.hiddens
632
+ new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens
633
+ new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
634
+ return out, new_mems
635
+
636
+ if return_attn:
637
+ attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
638
+ return out, attn_maps
639
+
640
+ return out
641
+
3DTopia/ldm/util.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+
3
+ import torch
4
+ import numpy as np
5
+ from collections import abc
6
+ from einops import rearrange
7
+ from functools import partial
8
+
9
+ import multiprocessing as mp
10
+ from threading import Thread
11
+ from queue import Queue
12
+
13
+ from inspect import isfunction
14
+ from PIL import Image, ImageDraw, ImageFont
15
+
16
+
17
+ def log_txt_as_img(wh, xc, size=10):
18
+ # wh a tuple of (width, height)
19
+ # xc a list of captions to plot
20
+ b = len(xc)
21
+ txts = list()
22
+ for bi in range(b):
23
+ txt = Image.new("RGB", wh, color="white")
24
+ draw = ImageDraw.Draw(txt)
25
+ font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)
26
+ nc = int(40 * (wh[0] / 256))
27
+ lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
28
+
29
+ try:
30
+ draw.text((0, 0), lines, fill="black", font=font)
31
+ except UnicodeEncodeError:
32
+ print("Cant encode string for logging. Skipping.")
33
+
34
+ txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
35
+ txts.append(txt)
36
+ txts = np.stack(txts)
37
+ txts = torch.tensor(txts)
38
+ return txts
39
+
40
+
41
+ def ismap(x):
42
+ if not isinstance(x, torch.Tensor):
43
+ return False
44
+ return (len(x.shape) == 4) and (x.shape[1] > 3)
45
+
46
+
47
+ def isimage(x):
48
+ if not isinstance(x, torch.Tensor):
49
+ return False
50
+ return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
51
+
52
+
53
+ def exists(x):
54
+ return x is not None
55
+
56
+
57
+ def default(val, d):
58
+ if exists(val):
59
+ return val
60
+ return d() if isfunction(d) else d
61
+
62
+
63
+ def mean_flat(tensor):
64
+ """
65
+ https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
66
+ Take the mean over all non-batch dimensions.
67
+ """
68
+ return tensor.mean(dim=list(range(1, len(tensor.shape))))
69
+
70
+
71
+ def count_params(model, verbose=False):
72
+ total_params = sum(p.numel() for p in model.parameters())
73
+ if verbose:
74
+ print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.")
75
+ return total_params
76
+
77
+
78
+ def instantiate_from_config(config):
79
+ if not "target" in config:
80
+ if config == '__is_first_stage__':
81
+ return None
82
+ elif config == "__is_unconditional__":
83
+ return None
84
+ raise KeyError("Expected key `target` to instantiate.")
85
+ return get_obj_from_str(config["target"])(**config.get("params", dict()))
86
+
87
+
88
+ def get_obj_from_str(string, reload=False):
89
+ module, cls = string.rsplit(".", 1)
90
+ if reload:
91
+ module_imp = importlib.import_module(module)
92
+ importlib.reload(module_imp)
93
+ return getattr(importlib.import_module(module, package=None), cls)
94
+
95
+
96
+ def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False):
97
+ # create dummy dataset instance
98
+
99
+ # run prefetching
100
+ if idx_to_fn:
101
+ res = func(data, worker_id=idx)
102
+ else:
103
+ res = func(data)
104
+ Q.put([idx, res])
105
+ Q.put("Done")
106
+
107
+
108
+ def parallel_data_prefetch(
109
+ func: callable, data, n_proc, target_data_type="ndarray", cpu_intensive=True, use_worker_id=False
110
+ ):
111
+ # if target_data_type not in ["ndarray", "list"]:
112
+ # raise ValueError(
113
+ # "Data, which is passed to parallel_data_prefetch has to be either of type list or ndarray."
114
+ # )
115
+ if isinstance(data, np.ndarray) and target_data_type == "list":
116
+ raise ValueError("list expected but function got ndarray.")
117
+ elif isinstance(data, abc.Iterable):
118
+ if isinstance(data, dict):
119
+ print(
120
+ f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.'
121
+ )
122
+ data = list(data.values())
123
+ if target_data_type == "ndarray":
124
+ data = np.asarray(data)
125
+ else:
126
+ data = list(data)
127
+ else:
128
+ raise TypeError(
129
+ f"The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}."
130
+ )
131
+
132
+ if cpu_intensive:
133
+ Q = mp.Queue(1000)
134
+ proc = mp.Process
135
+ else:
136
+ Q = Queue(1000)
137
+ proc = Thread
138
+ # spawn processes
139
+ if target_data_type == "ndarray":
140
+ arguments = [
141
+ [func, Q, part, i, use_worker_id]
142
+ for i, part in enumerate(np.array_split(data, n_proc))
143
+ ]
144
+ else:
145
+ step = (
146
+ int(len(data) / n_proc + 1)
147
+ if len(data) % n_proc != 0
148
+ else int(len(data) / n_proc)
149
+ )
150
+ arguments = [
151
+ [func, Q, part, i, use_worker_id]
152
+ for i, part in enumerate(
153
+ [data[i: i + step] for i in range(0, len(data), step)]
154
+ )
155
+ ]
156
+ processes = []
157
+ for i in range(n_proc):
158
+ p = proc(target=_do_parallel_data_prefetch, args=arguments[i])
159
+ processes += [p]
160
+
161
+ # start processes
162
+ print(f"Start prefetching...")
163
+ import time
164
+
165
+ start = time.time()
166
+ gather_res = [[] for _ in range(n_proc)]
167
+ try:
168
+ for p in processes:
169
+ p.start()
170
+
171
+ k = 0
172
+ while k < n_proc:
173
+ # get result
174
+ res = Q.get()
175
+ if res == "Done":
176
+ k += 1
177
+ else:
178
+ gather_res[res[0]] = res[1]
179
+
180
+ except Exception as e:
181
+ print("Exception: ", e)
182
+ for p in processes:
183
+ p.terminate()
184
+
185
+ raise e
186
+ finally:
187
+ for p in processes:
188
+ p.join()
189
+ print(f"Prefetching complete. [{time.time() - start} sec.]")
190
+
191
+ if target_data_type == 'ndarray':
192
+ if not isinstance(gather_res[0], np.ndarray):
193
+ return np.concatenate([np.asarray(r) for r in gather_res], axis=0)
194
+
195
+ # order outputs
196
+ return np.concatenate(gather_res, axis=0)
197
+ elif target_data_type == 'list':
198
+ out = []
199
+ for r in gather_res:
200
+ out.extend(r)
201
+ return out
202
+ else:
203
+ return gather_res
3DTopia/model/auto_regressive.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import imageio
2
+ import os, math
3
+ import wandb
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import pytorch_lightning as pl
7
+
8
+ from utility.initialize import instantiate_from_config
9
+ from taming.modules.util import SOSProvider
10
+ from utility.triplane_renderer.renderer import get_embedder, NeRF, run_network, render_path1, to8b, img2mse, mse2psnr
11
+ import numpy as np
12
+
13
+ from tqdm import tqdm
14
+
15
+
16
+ def disabled_train(self, mode=True):
17
+ """Overwrite model.train with this function to make sure train/eval mode
18
+ does not change anymore."""
19
+ return self
20
+
21
+
22
+ class Net2NetTransformer(pl.LightningModule):
23
+ def __init__(self,
24
+ transformer_config,
25
+ first_stage_config,
26
+ cond_stage_config,
27
+ permuter_config=None,
28
+ ckpt_path=None,
29
+ ignore_keys=[],
30
+ first_stage_key="triplane",
31
+ cond_stage_key="depth",
32
+ downsample_cond_size=-1,
33
+ pkeep=1.0,
34
+ sos_token=0,
35
+ unconditional=True,
36
+ learning_rate=1e-4,
37
+ ):
38
+ super().__init__()
39
+ self.be_unconditional = unconditional
40
+ self.sos_token = sos_token
41
+ self.first_stage_key = first_stage_key
42
+ # self.cond_stage_key = cond_stage_key
43
+ self.init_first_stage_from_ckpt(first_stage_config)
44
+ # self.init_cond_stage_from_ckpt(cond_stage_config)
45
+ if permuter_config is None:
46
+ permuter_config = {"target": "taming.modules.transformer.permuter.Identity"}
47
+ self.permuter = instantiate_from_config(config=permuter_config)
48
+ self.transformer = instantiate_from_config(config=transformer_config)
49
+
50
+ if ckpt_path is not None:
51
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
52
+ self.downsample_cond_size = downsample_cond_size
53
+ self.pkeep = pkeep
54
+ self.learning_rate = learning_rate
55
+
56
+ def init_from_ckpt(self, path, ignore_keys=list()):
57
+ sd = torch.load(path, map_location="cpu")["state_dict"]
58
+ for k in sd.keys():
59
+ for ik in ignore_keys:
60
+ if k.startswith(ik):
61
+ self.print("Deleting key {} from state_dict.".format(k))
62
+ del sd[k]
63
+ self.load_state_dict(sd, strict=False)
64
+ print(f"Restored from {path}")
65
+
66
+ def init_first_stage_from_ckpt(self, config):
67
+ model = instantiate_from_config(config)
68
+ # model = model.eval()
69
+ # model.train = disabled_train
70
+
71
+ self.first_stage_model = model
72
+
73
+ for param in self.first_stage_model.parameters():
74
+ param.requires_grad = False
75
+
76
+ self.first_stage_model.vector_quantizer.training = False
77
+ self.first_stage_model.vector_quantizer.embedding.update = False
78
+
79
+ def init_cond_stage_from_ckpt(self, config):
80
+ if config == "__is_first_stage__":
81
+ print("Using first stage also as cond stage.")
82
+ self.cond_stage_model = self.first_stage_model
83
+ elif config == "__is_unconditional__" or self.be_unconditional:
84
+ print(f"Using no cond stage. Assuming the training is intended to be unconditional. "
85
+ f"Prepending {self.sos_token} as a sos token.")
86
+ self.be_unconditional = True
87
+ self.cond_stage_key = self.first_stage_key
88
+ self.cond_stage_model = SOSProvider(self.sos_token)
89
+ else:
90
+ model = instantiate_from_config(config)
91
+ model = model.eval()
92
+ model.train = disabled_train
93
+ self.cond_stage_model = model
94
+
95
+ def forward(self, x, c):
96
+ # one step to produce the logits
97
+ _, z_indices = self.encode_to_z(x)
98
+ # _, c_indices = self.encode_to_c(c)
99
+
100
+ if self.training and self.pkeep < 1.0:
101
+ mask = torch.bernoulli(self.pkeep*torch.ones(z_indices.shape,
102
+ device=z_indices.device))
103
+ mask = mask.round().to(dtype=torch.int64)
104
+ r_indices = torch.randint_like(z_indices, self.transformer.config.vocab_size)
105
+ a_indices = mask*z_indices+(1-mask)*r_indices
106
+ else:
107
+ a_indices = z_indices
108
+
109
+ c_indices = torch.zeros_like(z_indices[:, 0:1]) + self.transformer.config.vocab_size - 1
110
+ cz_indices = torch.cat((c_indices, a_indices), dim=1)
111
+
112
+ # target includes all sequence elements (no need to handle first one
113
+ # differently because we are conditioning)
114
+ target = z_indices
115
+ # make the prediction
116
+ logits, _ = self.transformer(cz_indices[:, :-1])
117
+ # cut off conditioning outputs - output i corresponds to p(z_i | z_{<i}, c)
118
+ # logits = logits[:, c_indices.shape[1]-1:]
119
+
120
+ return logits, target
121
+
122
+ def top_k_logits(self, logits, k):
123
+ v, ix = torch.topk(logits, k)
124
+ out = logits.clone()
125
+ out[out < v[..., [-1]]] = -float('Inf')
126
+ return out
127
+
128
+ @torch.no_grad()
129
+ def sample(self, x, c, steps, temperature=1.0, sample=False, top_k=None,
130
+ callback=lambda k: None):
131
+ x = torch.cat((c,x),dim=1)
132
+ block_size = self.transformer.get_block_size()
133
+ assert not self.transformer.training
134
+ if self.pkeep <= 0.0:
135
+ # one pass suffices since input is pure noise anyway
136
+ assert len(x.shape)==2
137
+ noise_shape = (x.shape[0], steps-1)
138
+ #noise = torch.randint(self.transformer.config.vocab_size, noise_shape).to(x)
139
+ noise = c.clone()[:,x.shape[1]-c.shape[1]:-1]
140
+ x = torch.cat((x,noise),dim=1)
141
+ logits, _ = self.transformer(x)
142
+ # take all logits for now and scale by temp
143
+ logits = logits / temperature
144
+ # optionally crop probabilities to only the top k options
145
+ if top_k is not None:
146
+ logits = self.top_k_logits(logits, top_k)
147
+ # apply softmax to convert to probabilities
148
+ probs = F.softmax(logits, dim=-1)
149
+ # sample from the distribution or take the most likely
150
+ if sample:
151
+ shape = probs.shape
152
+ probs = probs.reshape(shape[0]*shape[1],shape[2])
153
+ ix = torch.multinomial(probs, num_samples=1)
154
+ probs = probs.reshape(shape[0],shape[1],shape[2])
155
+ ix = ix.reshape(shape[0],shape[1])
156
+ else:
157
+ _, ix = torch.topk(probs, k=1, dim=-1)
158
+ # cut off conditioning
159
+ x = ix[:, c.shape[1]-1:]
160
+ else:
161
+ for k in tqdm(range(steps)):
162
+ callback(k)
163
+ assert x.size(1) <= block_size # make sure model can see conditioning
164
+ x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
165
+ logits, _ = self.transformer(x_cond)
166
+ # pluck the logits at the final step and scale by temperature
167
+ logits = logits[:, -1, :] / temperature
168
+ # optionally crop probabilities to only the top k options
169
+ if top_k is not None:
170
+ logits = self.top_k_logits(logits, top_k)
171
+ # apply softmax to convert to probabilities
172
+ probs = F.softmax(logits, dim=-1)
173
+ # sample from the distribution or take the most likely
174
+ if sample:
175
+ ix = torch.multinomial(probs, num_samples=1)
176
+ else:
177
+ _, ix = torch.topk(probs, k=1, dim=-1)
178
+ # append to the sequence and continue
179
+ x = torch.cat((x, ix), dim=1)
180
+ # cut off conditioning
181
+ x = x[:, c.shape[1]:]
182
+ return x
183
+
184
+ @torch.no_grad()
185
+ def encode_to_z(self, x):
186
+ quant_z, _, perplexity, encoding_indices = self.first_stage_model.encode(x, rollout=True)
187
+ indices = encoding_indices.view(quant_z.shape[0], -1)
188
+ indices = self.permuter(indices)
189
+ return quant_z, indices
190
+
191
+ @torch.no_grad()
192
+ def encode_to_c(self, c):
193
+ if self.downsample_cond_size > -1:
194
+ c = F.interpolate(c, size=(self.downsample_cond_size, self.downsample_cond_size))
195
+ quant_c, _, [_,_,indices] = self.cond_stage_model.encode(c)
196
+ if len(indices.shape) > 2:
197
+ indices = indices.view(c.shape[0], -1)
198
+ return quant_c, indices
199
+
200
+ # @torch.no_grad()
201
+ # def decode_to_img(self, index, zshape):
202
+ # index = self.permuter(index, reverse=True)
203
+ # bhwc = (zshape[0],zshape[2],zshape[3],zshape[1])
204
+ # quant_z = self.first_stage_model.quantize.get_codebook_entry(
205
+ # index.reshape(-1), shape=bhwc)
206
+ # x = self.first_stage_model.decode(quant_z)
207
+ # return x
208
+
209
+ @torch.no_grad()
210
+ def decode_to_triplane(self, index, zshape):
211
+ quant_z = self.first_stage_model.vector_quantizer.dequantize(index)
212
+ quant_z = quant_z.reshape(zshape[0], zshape[2], zshape[3], zshape[1])
213
+ quant_z = quant_z.permute(0, 3, 1, 2)
214
+ z = self.first_stage_model.decode(quant_z)
215
+ return z
216
+
217
+ @torch.no_grad()
218
+ def log_images(self, batch, temperature=None, top_k=None, callback=None, lr_interface=False, **kwargs):
219
+ log = dict()
220
+
221
+ N = 2
222
+ if lr_interface:
223
+ x, c = self.get_xc(batch, N, diffuse=False, upsample_factor=8)
224
+ else:
225
+ x, c = self.get_xc(batch, N)
226
+ x = x.to(device=self.device)
227
+ # c = c.to(device=self.device)
228
+ log["inputs"] = self.render_triplane(x, batch)
229
+
230
+ quant_z, z_indices = self.encode_to_z(x)
231
+ # quant_c, c_indices = self.encode_to_c(c)
232
+ c_indices = torch.zeros_like(z_indices[:, 0:1]) + self.transformer.config.vocab_size - 1
233
+
234
+ # create a "half"" sample
235
+ z_start_indices = z_indices[:,:z_indices.shape[1]//2]
236
+ index_sample = self.sample(z_start_indices, c_indices,
237
+ steps=z_indices.shape[1]-z_start_indices.shape[1],
238
+ temperature=temperature if temperature is not None else 1.0,
239
+ sample=True,
240
+ top_k=top_k if top_k is not None else 100,
241
+ callback=callback if callback is not None else lambda k: None)
242
+ x_sample = self.first_stage_model.unrollout(self.decode_to_triplane(index_sample, quant_z.shape))
243
+ log["samples_half"] = self.render_triplane(x_sample, batch)
244
+
245
+ # sample
246
+ z_start_indices = z_indices[:, :0]
247
+ index_sample = self.sample(z_start_indices, c_indices,
248
+ steps=z_indices.shape[1],
249
+ temperature=temperature if temperature is not None else 1.0,
250
+ sample=True,
251
+ top_k=top_k if top_k is not None else 100,
252
+ callback=callback if callback is not None else lambda k: None)
253
+ x_sample_nopix = self.first_stage_model.unrollout(self.decode_to_triplane(index_sample, quant_z.shape))
254
+ log["samples_nopix"] = self.render_triplane(x_sample_nopix, batch)
255
+
256
+ # # det sample
257
+ # z_start_indices = z_indices[:, :0]
258
+ # index_sample = self.sample(z_start_indices, c_indices,
259
+ # steps=z_indices.shape[1],
260
+ # sample=False,
261
+ # callback=callback if callback is not None else lambda k: None)
262
+ # x_sample_det = self.first_stage_model.unrollout(self.decode_to_triplane(index_sample, quant_z.shape))
263
+ # log["samples_det"] = self.render_triplane(x_sample_det, batch)
264
+
265
+ # reconstruction
266
+ x_rec = self.first_stage_model.unrollout(self.decode_to_triplane(z_indices, quant_z.shape))
267
+ # x_rec = self.first_stage_model.unrollout(self.first_stage_model(self.first_stage_model.rollout(x))[0])
268
+ log["reconstructions"] = self.render_triplane(x_rec, batch)
269
+
270
+ # if self.cond_stage_key in ["objects_bbox", "objects_center_points"]:
271
+ # figure_size = (x_rec.shape[2], x_rec.shape[3])
272
+ # dataset = kwargs["pl_module"].trainer.datamodule.datasets["validation"]
273
+ # label_for_category_no = dataset.get_textual_label_for_category_no
274
+ # plotter = dataset.conditional_builders[self.cond_stage_key].plot
275
+ # log["conditioning"] = torch.zeros_like(log["reconstructions"])
276
+ # for i in range(quant_c.shape[0]):
277
+ # log["conditioning"][i] = plotter(quant_c[i], label_for_category_no, figure_size)
278
+ # log["conditioning_rec"] = log["conditioning"]
279
+ # elif self.cond_stage_key != "image":
280
+ # cond_rec = self.cond_stage_model.decode(quant_c)
281
+ # if self.cond_stage_key == "segmentation":
282
+ # # get image from segmentation mask
283
+ # num_classes = cond_rec.shape[1]
284
+
285
+ # c = torch.argmax(c, dim=1, keepdim=True)
286
+ # c = F.one_hot(c, num_classes=num_classes)
287
+ # c = c.squeeze(1).permute(0, 3, 1, 2).float()
288
+ # c = self.cond_stage_model.to_rgb(c)
289
+
290
+ # cond_rec = torch.argmax(cond_rec, dim=1, keepdim=True)
291
+ # cond_rec = F.one_hot(cond_rec, num_classes=num_classes)
292
+ # cond_rec = cond_rec.squeeze(1).permute(0, 3, 1, 2).float()
293
+ # cond_rec = self.cond_stage_model.to_rgb(cond_rec)
294
+ # log["conditioning_rec"] = cond_rec
295
+ # log["conditioning"] = c
296
+
297
+ return log
298
+
299
+ def render_triplane(self, triplane, batch):
300
+ batch_size = triplane.shape[0]
301
+ rgb_list = []
302
+ for b in range(batch_size):
303
+ rgb, cur_psnr_list = self.first_stage_model.render_triplane_eg3d_decoder(
304
+ triplane[b:b+1], batch['batch_rays'][b], batch['img'][b],
305
+ )
306
+ rgb = to8b(rgb.detach().cpu().numpy())
307
+ rgb_list.append(rgb[1])
308
+
309
+ return np.stack(rgb_list, 0)
310
+
311
+ def get_input(self, key, batch):
312
+ x = batch[key]
313
+ # if len(x.shape) == 3:
314
+ # x = x[..., None]
315
+ # if len(x.shape) == 4:
316
+ # x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
317
+ # if x.dtype == torch.double:
318
+ # x = x.float()
319
+ return x
320
+
321
+ def get_xc(self, batch, N=None):
322
+ x = self.get_input(self.first_stage_key, batch)
323
+ # c = self.get_input(self.cond_stage_key, batch)
324
+ if N is not None:
325
+ x = x[:N]
326
+ # c = c[:N]
327
+ return x, None
328
+
329
+ def shared_step(self, batch, batch_idx):
330
+ x, c = self.get_xc(batch)
331
+ logits, target = self(x, c)
332
+ loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), target.reshape(-1))
333
+ return loss
334
+
335
+ def training_step(self, batch, batch_idx):
336
+ loss = self.shared_step(batch, batch_idx)
337
+ self.log("train/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
338
+ return loss
339
+
340
+ def validation_step(self, batch, batch_idx):
341
+ loss = self.shared_step(batch, batch_idx)
342
+ self.log("val/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
343
+ if batch_idx == 0:
344
+ imgs = self.log_images(batch)
345
+ for i in range(imgs['inputs'].shape[0]):
346
+ self.logger.experiment.log({
347
+ "val/vis/inputs": [wandb.Image(imgs['inputs'][i])],
348
+ "val/vis/reconstructions": [wandb.Image(imgs['reconstructions'][i])],
349
+ "val/vis/samples_half": [wandb.Image(imgs['samples_half'][i])],
350
+ "val/vis/samples_nopix": [wandb.Image(imgs['samples_nopix'][i])],
351
+ # "val/vis/samples_det": [wandb.Image(imgs['samples_det'][i])],
352
+ })
353
+ return loss
354
+
355
+ def test_step(self, batch, batch_idx):
356
+ loss = self.shared_step(batch, batch_idx)
357
+ self.log("test/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
358
+ imgs = self.log_images(batch, temperature=1.8)
359
+ print("Saved to {}".format(self.logger.log_dir))
360
+ for i in range(imgs['inputs'].shape[0]):
361
+ imageio.imwrite(os.path.join(self.logger.log_dir, "inputs_{}_{}.png".format(batch_idx, i)), imgs['inputs'][i])
362
+ imageio.imwrite(os.path.join(self.logger.log_dir, "reconstructions_{}_{}.png".format(batch_idx, i)), imgs['reconstructions'][i])
363
+ imageio.imwrite(os.path.join(self.logger.log_dir, "samples_half_{}_{}.png".format(batch_idx, i)), imgs['samples_half'][i])
364
+ imageio.imwrite(os.path.join(self.logger.log_dir, "samples_nopix_{}_{}.png".format(batch_idx, i)), imgs['samples_nopix'][i])
365
+ # imageio.imwrite(os.path.join(self.logger.log_dir, "samples_det_{}_{}.png".format(batch_idx, i)), imgs['samples_det'][i])
366
+ return loss
367
+
368
+ def configure_optimizers(self):
369
+ """
370
+ Following minGPT:
371
+ This long function is unfortunately doing something very simple and is being very defensive:
372
+ We are separating out all parameters of the model into two buckets: those that will experience
373
+ weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
374
+ We are then returning the PyTorch optimizer object.
375
+ """
376
+ # separate out all parameters to those that will and won't experience regularizing weight decay
377
+ decay = set()
378
+ no_decay = set()
379
+ whitelist_weight_modules = (torch.nn.Linear, )
380
+ blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
381
+ for mn, m in self.transformer.named_modules():
382
+ for pn, p in m.named_parameters():
383
+ fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
384
+
385
+ if pn.endswith('bias'):
386
+ # all biases will not be decayed
387
+ no_decay.add(fpn)
388
+ elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
389
+ # weights of whitelist modules will be weight decayed
390
+ decay.add(fpn)
391
+ elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
392
+ # weights of blacklist modules will NOT be weight decayed
393
+ no_decay.add(fpn)
394
+
395
+ # special case the position embedding parameter in the root GPT module as not decayed
396
+ no_decay.add('pos_emb')
397
+
398
+ # validate that we considered every parameter
399
+ param_dict = {pn: p for pn, p in self.transformer.named_parameters()}
400
+ inter_params = decay & no_decay
401
+ union_params = decay | no_decay
402
+ assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
403
+ assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
404
+ % (str(param_dict.keys() - union_params), )
405
+
406
+ # create the pytorch optimizer object
407
+ optim_groups = [
408
+ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01},
409
+ {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
410
+ ]
411
+ optimizer = torch.optim.AdamW(optim_groups, lr=self.learning_rate, betas=(0.9, 0.95))
412
+ return optimizer
3DTopia/model/sv_vae_triplane.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import imageio
3
+ import numpy as np
4
+ import torch
5
+ import torchvision
6
+ import torch.nn as nn
7
+ import pytorch_lightning as pl
8
+ import wandb
9
+
10
+ import lpips
11
+ from pytorch_msssim import SSIM
12
+
13
+ from utility.initialize import instantiate_from_config
14
+
15
+ class VAE(pl.LightningModule):
16
+ def __init__(self, vae_configs, renderer_configs, lr=1e-3, weight_decay=1e-2,
17
+ kld_weight=1, mse_weight=1, lpips_weight=0.1, ssim_weight=0.1,
18
+ log_image_freq=50):
19
+ super().__init__()
20
+ self.save_hyperparameters()
21
+
22
+ self.lr = lr
23
+ self.weight_decay = weight_decay
24
+ self.kld_weight = kld_weight
25
+ self.mse_weight = mse_weight
26
+ self.lpips_weight = lpips_weight
27
+ self.ssim_weight = ssim_weight
28
+ self.log_image_freq = log_image_freq
29
+
30
+ self.vae = instantiate_from_config(vae_configs)
31
+ self.renderer = instantiate_from_config(renderer_configs)
32
+
33
+ self.lpips_fn = lpips.LPIPS(net='alex')
34
+ self.ssim_fn = SSIM(data_range=1, size_average=True, channel=3)
35
+
36
+ self.triplane_render_kwargs = {
37
+ 'depth_resolution': 64,
38
+ 'disparity_space_sampling': False,
39
+ 'box_warp': 2.4,
40
+ 'depth_resolution_importance': 64,
41
+ 'clamp_mode': 'softplus',
42
+ 'white_back': True,
43
+ }
44
+
45
+ def forward(self, batch, is_train):
46
+ encoder_img, input_img, input_ray_o, input_ray_d, \
47
+ target_img, target_ray_o, target_ray_d = batch
48
+ grid, mu, logvar = self.vae(encoder_img, is_train)
49
+
50
+ cat_ray_o = torch.cat([input_ray_o, target_ray_o], 0)
51
+ cat_ray_d = torch.cat([input_ray_d, target_ray_d], 0)
52
+ render_out = self.renderer(torch.cat([grid, grid], 0), cat_ray_o, cat_ray_d, self.triplane_render_kwargs)
53
+ render_gt = torch.cat([input_img, target_img], 0)
54
+
55
+ return render_out['rgb_marched'], render_out['depth_final'], \
56
+ render_out['weights'], mu, logvar, render_gt
57
+
58
+ def calc_loss(self, render, mu, logvar, render_gt):
59
+ mse = torch.mean((render - render_gt) ** 2)
60
+ ssim_loss = 1 - self.ssim_fn(render, render_gt)
61
+ lpips_loss = self.lpips_fn((render * 2) - 1, (render_gt * 2) - 1).mean()
62
+ kld_loss = -0.5 * torch.mean(torch.mean(1 + logvar - mu.pow(2) - logvar.exp(), 1))
63
+
64
+ loss = self.mse_weight * mse + self.ssim_weight * ssim_loss + \
65
+ self.lpips_weight * lpips_loss + self.kld_weight * kld_loss
66
+
67
+ return {
68
+ 'loss': loss,
69
+ 'mse': mse,
70
+ 'ssim': ssim_loss,
71
+ 'lpips': lpips_loss,
72
+ 'kld': kld_loss,
73
+ }
74
+
75
+ def log_dict(self, loss_dict, prefix):
76
+ for k, v in loss_dict.items():
77
+ self.log(prefix + k, v, on_step=True, logger=True)
78
+
79
+ def make_grid(self, render, depth, render_gt):
80
+ bs = render.shape[0] // 2
81
+ grid = torchvision.utils.make_grid(
82
+ torch.stack([render_gt[0], render_gt[bs], render[0], depth[0], render[bs], depth[bs]], 0))
83
+ grid = (grid.detach().cpu().permute(1, 2, 0) * 255.).numpy().astype(np.uint8)
84
+ return grid
85
+
86
+ def training_step(self, batch, batch_idx):
87
+ render, depth, weights, mu, logvar, render_gt = self.forward(batch, True)
88
+ loss_dict = self.calc_loss(render, mu, logvar, render_gt)
89
+ self.log_dict(loss_dict, 'train/')
90
+ if batch_idx % self.log_image_freq == 0:
91
+ self.logger.experiment.log({
92
+ 'train/vis': [wandb.Image(self.make_grid(
93
+ render, depth, render_gt
94
+ ))]
95
+ })
96
+ return loss_dict['loss']
97
+
98
+ def validation_step(self, batch, batch_idx):
99
+ render, depth, _, mu, logvar, render_gt = self.forward(batch, False)
100
+ loss_dict = self.calc_loss(render, mu, logvar, render_gt)
101
+ self.log_dict(loss_dict, 'val/')
102
+ if batch_idx % self.log_image_freq == 0:
103
+ self.logger.experiment.log({
104
+ 'val/vis': [wandb.Image(self.make_grid(
105
+ render, depth, render_gt
106
+ ))]
107
+ })
108
+
109
+ def configure_optimizers(self):
110
+ optimizer = torch.optim.AdamW(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
111
+ return optimizer
3DTopia/model/triplane_vae.py ADDED
The diff for this file is too large to render. See raw diff
3DTopia/model/triplane_vqvae.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import imageio
3
+ import torch
4
+ import wandb
5
+ import numpy as np
6
+ import pytorch_lightning as pl
7
+ import torch.nn.functional as F
8
+
9
+ from module.model_2d import Encoder, Decoder, DiagonalGaussianDistribution, Encoder_GroupConv, Decoder_GroupConv, Encoder_GroupConv_LateFusion, Decoder_GroupConv_LateFusion
10
+ from utility.initialize import instantiate_from_config
11
+ from utility.triplane_renderer.renderer import get_embedder, NeRF, run_network, render_path1, to8b, img2mse, mse2psnr
12
+ from utility.triplane_renderer.eg3d_renderer import Renderer_TriPlane
13
+ from module.quantise import VectorQuantiser
14
+ from module.quantize_taming import EMAVectorQuantizer, VectorQuantizer2, QuantizeEMAReset
15
+
16
+ class CVQVAE(pl.LightningModule):
17
+ def __init__(self,
18
+ ddconfig,
19
+ lossconfig,
20
+ embed_dim,
21
+ learning_rate=1e-3,
22
+ ckpt_path=None,
23
+ ignore_keys=[],
24
+ colorize_nlabels=None,
25
+ monitor=None,
26
+ decoder_ckpt=None,
27
+ norm=True,
28
+ renderer_type='nerf',
29
+ is_cvqvae=False,
30
+ renderer_config=dict(
31
+ rgbnet_dim=18,
32
+ rgbnet_width=128,
33
+ viewpe=0,
34
+ feape=0
35
+ ),
36
+ vector_quantizer_config=dict(
37
+ num_embed=1024,
38
+ beta=0.25,
39
+ distance='cos',
40
+ anchor='closest',
41
+ first_batch=False,
42
+ contras_loss=True,
43
+ )
44
+ ):
45
+ super().__init__()
46
+ self.save_hyperparameters()
47
+ self.norm = norm
48
+ self.renderer_config = renderer_config
49
+ self.learning_rate = learning_rate
50
+
51
+ ddconfig['double_z'] = False
52
+ self.encoder = Encoder_GroupConv(**ddconfig)
53
+ self.decoder = Decoder_GroupConv(**ddconfig)
54
+
55
+ self.lossconfig = lossconfig
56
+
57
+ self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
58
+ self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
59
+
60
+ self.embed_dim = embed_dim
61
+ if colorize_nlabels is not None:
62
+ assert type(colorize_nlabels)==int
63
+ self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
64
+ if monitor is not None:
65
+ self.monitor = monitor
66
+
67
+ self.decoder_ckpt = decoder_ckpt
68
+ self.renderer_type = renderer_type
69
+ if decoder_ckpt is not None:
70
+ self.triplane_decoder, self.triplane_render_kwargs = self.create_eg3d_decoder(decoder_ckpt)
71
+
72
+ vector_quantizer_config['embed_dim'] = embed_dim
73
+
74
+ if is_cvqvae:
75
+ self.vector_quantizer = VectorQuantiser(
76
+ **vector_quantizer_config
77
+ )
78
+ else:
79
+ self.vector_quantizer = EMAVectorQuantizer(
80
+ n_embed=vector_quantizer_config['num_embed'],
81
+ codebook_dim = embed_dim,
82
+ beta=vector_quantizer_config['beta']
83
+ )
84
+ # self.vector_quantizer = VectorQuantizer2(
85
+ # n_e = vector_quantizer_config['num_embed'],
86
+ # e_dim = embed_dim,
87
+ # beta = vector_quantizer_config['beta']
88
+ # )
89
+ # self.vector_quantizer = QuantizeEMAReset(
90
+ # nb_code = vector_quantizer_config['num_embed'],
91
+ # code_dim = embed_dim,
92
+ # mu = vector_quantizer_config['beta'],
93
+ # )
94
+
95
+ self.psum = torch.zeros([1])
96
+ self.psum_sq = torch.zeros([1])
97
+ self.psum_min = torch.zeros([1])
98
+ self.psum_max = torch.zeros([1])
99
+ self.count = 0
100
+ self.len_dset = 0
101
+ self.latent_list = []
102
+
103
+ if ckpt_path is not None:
104
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
105
+
106
+ def init_from_ckpt(self, path, ignore_keys=list()):
107
+ sd = torch.load(path, map_location="cpu")["state_dict"]
108
+ keys = list(sd.keys())
109
+ for k in keys:
110
+ for ik in ignore_keys:
111
+ if k.startswith(ik):
112
+ print("Deleting key {} from state_dict.".format(k))
113
+ del sd[k]
114
+ self.load_state_dict(sd, strict=True)
115
+ print(f"Restored from {path}")
116
+
117
+ def encode(self, x, rollout=False):
118
+ if rollout:
119
+ x = self.rollout(x)
120
+ h = self.encoder(x)
121
+ moments = self.quant_conv(h)
122
+ z_q, loss, (perplexity, min_encodings, encoding_indices) = self.vector_quantizer(moments)
123
+ return z_q, loss, perplexity, encoding_indices
124
+
125
+ def decode(self, z, unrollout=False):
126
+ z = self.post_quant_conv(z)
127
+ dec = self.decoder(z)
128
+ if unrollout:
129
+ dec = self.unrollout(dec)
130
+ return dec
131
+
132
+ def forward(self, input):
133
+ z_q, loss, perplexity, encoding_indices = self.encode(input)
134
+ dec = self.decode(z_q)
135
+ return dec, loss, perplexity, encoding_indices
136
+
137
+ def rollout(self, triplane):
138
+ res = triplane.shape[-1]
139
+ ch = triplane.shape[1]
140
+ triplane = triplane.reshape(-1, 3, ch//3, res, res).permute(0, 2, 3, 1, 4).reshape(-1, ch//3, res, 3 * res)
141
+ return triplane
142
+
143
+ def to3daware(self, triplane):
144
+ res = triplane.shape[-2]
145
+ plane1 = triplane[..., :res]
146
+ plane2 = triplane[..., res:2*res]
147
+ plane3 = triplane[..., 2*res:3*res]
148
+
149
+ x_mp = torch.nn.MaxPool2d((res, 1))
150
+ y_mp = torch.nn.MaxPool2d((1, res))
151
+ x_mp_rep = lambda i: x_mp(i).repeat(1, 1, res, 1).permute(0, 1, 3, 2)
152
+ y_mp_rep = lambda i: y_mp(i).repeat(1, 1, 1, res).permute(0, 1, 3, 2)
153
+ # for plane1
154
+ plane21 = x_mp_rep(plane2)
155
+ plane31 = torch.flip(y_mp_rep(plane3), (3,))
156
+ new_plane1 = torch.cat([plane1, plane21, plane31], 1)
157
+ # for plane2
158
+ plane12 = y_mp_rep(plane1)
159
+ plane32 = x_mp_rep(plane3)
160
+ new_plane2 = torch.cat([plane2, plane12, plane32], 1)
161
+ # for plane3
162
+ plane13 = torch.flip(x_mp_rep(plane1), (2,))
163
+ plane23 = y_mp_rep(plane2)
164
+ new_plane3 = torch.cat([plane3, plane13, plane23], 1)
165
+
166
+ new_plane = torch.cat([new_plane1, new_plane2, new_plane3], -1).contiguous()
167
+ return new_plane
168
+
169
+ def unrollout(self, triplane):
170
+ res = triplane.shape[-2]
171
+ ch = 3 * triplane.shape[1]
172
+ triplane = triplane.reshape(-1, ch//3, res, 3, res).permute(0, 3, 1, 2, 4).reshape(-1, ch, res, res)
173
+ return triplane
174
+
175
+ def training_step(self, batch, batch_idx):
176
+ inputs = self.rollout(batch['triplane'])
177
+ reconstructions, vq_loss, perplexity, encoding_indices = self(inputs)
178
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, vq_loss, prefix='train/', batch=batch)
179
+ log_dict_ae['train/perplexity'] = perplexity
180
+ self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
181
+ return aeloss
182
+
183
+ def validation_step(self, batch, batch_idx):
184
+ inputs = self.rollout(batch['triplane'])
185
+ reconstructions, vq_loss, perplexity, encoding_indices = self(inputs)
186
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, vq_loss, prefix='val/', batch=None)
187
+ log_dict_ae['val/perplexity'] = perplexity
188
+ self.log_dict(log_dict_ae)
189
+
190
+ reconstructions = self.unrollout(reconstructions)
191
+ psnr_list = [] # between rec and gt
192
+ psnr_input_list = [] # between input and gt
193
+ psnr_rec_list = [] # between input and rec
194
+ batch_size = inputs.shape[0]
195
+ for b in range(batch_size):
196
+ rgb_input, cur_psnr_list_input = self.render_triplane_eg3d_decoder(
197
+ batch['triplane_ori'][b:b+1], batch['batch_rays'][b], batch['img'][b],
198
+ )
199
+ rgb, cur_psnr_list = self.render_triplane_eg3d_decoder(
200
+ reconstructions[b:b+1], batch['batch_rays'][b], batch['img'][b],
201
+ )
202
+
203
+ cur_psnr_list_rec = []
204
+ for i in range(rgb.shape[0]):
205
+ cur_psnr_list_rec.append(mse2psnr(img2mse(rgb_input[i], rgb[i])))
206
+
207
+ rgb_input = to8b(rgb_input.detach().cpu().numpy())
208
+ rgb_gt = to8b(batch['img'][b].detach().cpu().numpy())
209
+ rgb = to8b(rgb.detach().cpu().numpy())
210
+
211
+ if b % 4 == 0 and batch_idx < 10:
212
+ rgb_all = np.concatenate([rgb_gt[1], rgb_input[1], rgb[1]], 1)
213
+ self.logger.experiment.log({
214
+ "val/vis": [wandb.Image(rgb_all)],
215
+ })
216
+
217
+ psnr_list += cur_psnr_list
218
+ psnr_input_list += cur_psnr_list_input
219
+ psnr_rec_list += cur_psnr_list_rec
220
+
221
+ self.log("val/psnr_input_gt", torch.Tensor(psnr_input_list).mean(), prog_bar=True)
222
+ self.log("val/psnr_input_rec", torch.Tensor(psnr_rec_list).mean(), prog_bar=True)
223
+ self.log("val/psnr_rec_gt", torch.Tensor(psnr_list).mean(), prog_bar=True)
224
+
225
+ return self.log_dict
226
+
227
+ def to_rgb(self, plane):
228
+ x = plane.float()
229
+ if not hasattr(self, "colorize"):
230
+ self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
231
+ x = torch.nn.functional.conv2d(x, weight=self.colorize)
232
+ x = ((x - x.min()) / (x.max() - x.min()) * 255.).permute(0, 2, 3, 1).detach().cpu().numpy().astype(np.uint8)
233
+ return x
234
+
235
+ def to_rgb_triplane(self, plane):
236
+ x = plane.float()
237
+ if not hasattr(self, "colorize_triplane"):
238
+ self.colorize_triplane = torch.randn(3, x.shape[1], 1, 1).to(x)
239
+ x = torch.nn.functional.conv2d(x, weight=self.colorize_triplane)
240
+ x = ((x - x.min()) / (x.max() - x.min()) * 255.).permute(0, 2, 3, 1).detach().cpu().numpy().astype(np.uint8)
241
+ return x
242
+
243
+ def to_rgb_3daware(self, plane):
244
+ x = plane.float()
245
+ if not hasattr(self, "colorize_3daware"):
246
+ self.colorize_3daware = torch.randn(3, x.shape[1], 1, 1).to(x)
247
+ x = torch.nn.functional.conv2d(x, weight=self.colorize_3daware)
248
+ x = ((x - x.min()) / (x.max() - x.min()) * 255.).permute(0, 2, 3, 1).detach().cpu().numpy().astype(np.uint8)
249
+ return x
250
+
251
+ def test_step(self, batch, batch_idx):
252
+ inputs = self.rollout(batch['triplane'])
253
+ reconstructions, vq_loss, perplexity, encoding_indices = self(inputs)
254
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, vq_loss, prefix='test/', batch=None)
255
+ log_dict_ae['test/perplexity'] = perplexity
256
+ self.log_dict(log_dict_ae)
257
+
258
+ batch_size = inputs.shape[0]
259
+ psnr_list = [] # between rec and gt
260
+ psnr_input_list = [] # between input and gt
261
+ psnr_rec_list = [] # between input and rec
262
+
263
+ colorize_triplane_input = self.to_rgb_triplane(inputs)[0]
264
+ colorize_triplane_output = self.to_rgb_triplane(reconstructions)[0]
265
+
266
+ reconstructions = self.unrollout(reconstructions)
267
+
268
+ if self.norm:
269
+ assert NotImplementedError
270
+ else:
271
+ reconstructions_unnormalize = reconstructions
272
+
273
+ if True:
274
+ for b in range(batch_size):
275
+ rgb_input, cur_psnr_list_input = self.render_triplane_eg3d_decoder(
276
+ batch['triplane_ori'][b:b+1], batch['batch_rays'][b], batch['img'][b],
277
+ )
278
+ rgb, cur_psnr_list = self.render_triplane_eg3d_decoder(
279
+ reconstructions_unnormalize[b:b+1], batch['batch_rays'][b], batch['img'][b],
280
+ )
281
+
282
+ cur_psnr_list_rec = []
283
+ for i in range(rgb.shape[0]):
284
+ cur_psnr_list_rec.append(mse2psnr(img2mse(rgb_input[i], rgb[i])))
285
+
286
+ rgb_input = to8b(rgb_input.detach().cpu().numpy())
287
+ rgb_gt = to8b(batch['img'][b].detach().cpu().numpy())
288
+ rgb = to8b(rgb.detach().cpu().numpy())
289
+
290
+ if batch_idx < 10:
291
+ imageio.imwrite(os.path.join(self.logger.log_dir, "{}_{}_input.png".format(batch_idx, b)), rgb_input[1])
292
+ imageio.imwrite(os.path.join(self.logger.log_dir, "{}_{}_rec.png".format(batch_idx, b)), rgb[1])
293
+ imageio.imwrite(os.path.join(self.logger.log_dir, "{}_{}_gt.png".format(batch_idx, b)), rgb_gt[1])
294
+
295
+ psnr_list += cur_psnr_list
296
+ psnr_input_list += cur_psnr_list_input
297
+ psnr_rec_list += cur_psnr_list_rec
298
+
299
+ self.log("test/psnr_input_gt", torch.Tensor(psnr_input_list).mean(), prog_bar=True)
300
+ self.log("test/psnr_input_rec", torch.Tensor(psnr_rec_list).mean(), prog_bar=True)
301
+ self.log("test/psnr_rec_gt", torch.Tensor(psnr_list).mean(), prog_bar=True)
302
+
303
+ def on_test_epoch_end(self):
304
+ mean = self.psum / self.count
305
+ mean_min = self.psum_min / self.len_dset
306
+ mean_max = self.psum_max / self.len_dset
307
+ var = (self.psum_sq / self.count) - (mean ** 2)
308
+ std = torch.sqrt(var)
309
+
310
+ print("mean min: {}".format(mean_min))
311
+ print("mean max: {}".format(mean_max))
312
+ print("mean: {}".format(mean))
313
+ print("std: {}".format(std))
314
+
315
+ latent = np.concatenate(self.latent_list)
316
+ q75, q25 = np.percentile(latent.reshape(-1), [75 ,25])
317
+ median = np.median(latent.reshape(-1))
318
+ iqr = q75 - q25
319
+ norm_iqr = iqr * 0.7413
320
+ print("Norm IQR: {}".format(norm_iqr))
321
+ print("Inverse Norm IQR: {}".format(1/norm_iqr))
322
+ print("Median: {}".format(median))
323
+
324
+ def loss(self, inputs, reconstructions, vq_loss, prefix, batch=None):
325
+ reconstructions = reconstructions.contiguous()
326
+ rec_loss = F.mse_loss(inputs.contiguous(), reconstructions)
327
+ loss = self.lossconfig.rec_weight * rec_loss + self.lossconfig.vq_weight * vq_loss
328
+
329
+ ret_dict = {
330
+ prefix+'mean_rec_loss': torch.abs(inputs.contiguous() - reconstructions.contiguous()).mean().detach(),
331
+ prefix+'rec_loss': rec_loss,
332
+ prefix+'vq_loss': vq_loss,
333
+ prefix+'loss': loss,
334
+ }
335
+
336
+ render_weight = self.lossconfig.get("render_weight", 0)
337
+ tv_weight = self.lossconfig.get("tv_weight", 0)
338
+ l1_weight = self.lossconfig.get("l1_weight", 0)
339
+ latent_tv_weight = self.lossconfig.get("latent_tv_weight", 0)
340
+ latent_l1_weight = self.lossconfig.get("latent_l1_weight", 0)
341
+
342
+ triplane_rec = self.unrollout(reconstructions)
343
+ if render_weight > 0 and batch is not None:
344
+ rgb_rendered, target = self.render_triplane_eg3d_decoder_sample_pixel(triplane_rec, batch['batch_rays'], batch['img'])
345
+ render_loss = F.mse(rgb_rendered, target)
346
+ loss += render_weight * render_loss
347
+ ret_dict[prefix + 'render_loss'] = render_loss
348
+ if tv_weight > 0:
349
+ tvloss_y = torch.abs(triplane_rec[:, :, :-1] - triplane_rec[:, :, 1:]).mean()
350
+ tvloss_x = torch.abs(triplane_rec[:, :, :, :-1] - triplane_rec[:, :, :, 1:]).mean()
351
+ tvloss = tvloss_y + tvloss_x
352
+ loss += tv_weight * tvloss
353
+ ret_dict[prefix + 'tv_loss'] = tvloss
354
+ if l1_weight > 0:
355
+ l1 = (triplane_rec ** 2).mean()
356
+ loss += l1_weight * l1
357
+ ret_dict[prefix + 'l1_loss'] = l1
358
+
359
+ ret_dict[prefix+'loss'] = loss
360
+
361
+ return loss, ret_dict
362
+
363
+ def create_eg3d_decoder(self, decoder_ckpt):
364
+ triplane_decoder = Renderer_TriPlane(**self.renderer_config)
365
+ pretrain_pth = torch.load(decoder_ckpt, map_location='cpu')
366
+ pretrain_pth = {
367
+ '.'.join(k.split('.')[1:]): v for k, v in pretrain_pth.items()
368
+ }
369
+ # import pdb; pdb.set_trace()
370
+ triplane_decoder.load_state_dict(pretrain_pth)
371
+ render_kwargs = {
372
+ 'depth_resolution': 128,
373
+ 'disparity_space_sampling': False,
374
+ 'box_warp': 2.4,
375
+ 'depth_resolution_importance': 128,
376
+ 'clamp_mode': 'softplus',
377
+ 'white_back': True,
378
+ 'det': True
379
+ }
380
+ return triplane_decoder, render_kwargs
381
+
382
+ def render_triplane_eg3d_decoder(self, triplane, batch_rays, target):
383
+ ray_o = batch_rays[:, 0]
384
+ ray_d = batch_rays[:, 1]
385
+ psnr_list = []
386
+ rec_img_list = []
387
+ res = triplane.shape[-2]
388
+ for i in range(ray_o.shape[0]):
389
+ with torch.no_grad():
390
+ render_out = self.triplane_decoder(triplane.reshape(1, 3, -1, res, res),
391
+ ray_o[i:i+1], ray_d[i:i+1], self.triplane_render_kwargs, whole_img=True, tvloss=False)
392
+ rec_img = render_out['rgb_marched'].permute(0, 2, 3, 1)
393
+ psnr = mse2psnr(img2mse(rec_img[0], target[i]))
394
+ psnr_list.append(psnr)
395
+ rec_img_list.append(rec_img)
396
+ return torch.cat(rec_img_list, 0), psnr_list
397
+
398
+ def render_triplane_eg3d_decoder_sample_pixel(self, triplane, batch_rays, target, sample_num=1024):
399
+ assert batch_rays.shape[1] == 1
400
+ sel = torch.randint(batch_rays.shape[-2], [sample_num])
401
+ ray_o = batch_rays[:, 0, 0, sel]
402
+ ray_d = batch_rays[:, 0, 1, sel]
403
+ res = triplane.shape[-2]
404
+ render_out = self.triplane_decoder(triplane.reshape(triplane.shape[0], 3, -1, res, res),
405
+ ray_o, ray_d, self.triplane_render_kwargs, whole_img=False, tvloss=False)
406
+ rec_img = render_out['rgb_marched']
407
+ target = target.reshape(triplane.shape[0], -1, 3)[:, sel, :]
408
+ return rec_img, target
409
+
410
+ def configure_optimizers(self):
411
+ lr = self.learning_rate
412
+ opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
413
+ list(self.decoder.parameters())+
414
+ list(self.quant_conv.parameters())+
415
+ list(self.post_quant_conv.parameters())+
416
+ list(self.vector_quantizer.parameters()),
417
+ lr=lr)
418
+ return opt_ae
3DTopia/module/model_2d.py ADDED
@@ -0,0 +1,2206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pytorch_diffusion + derived encoder decoder
2
+ import math
3
+ import torch
4
+ import torch.nn as nn
5
+ import numpy as np
6
+ from einops import rearrange
7
+
8
+ from utility.initialize import instantiate_from_config
9
+ from .nn_2d import LinearAttention
10
+
11
+
12
+ def get_timestep_embedding(timesteps, embedding_dim):
13
+ """
14
+ This matches the implementation in Denoising Diffusion Probabilistic Models:
15
+ From Fairseq.
16
+ Build sinusoidal embeddings.
17
+ This matches the implementation in tensor2tensor, but differs slightly
18
+ from the description in Section 3.5 of "Attention Is All You Need".
19
+ """
20
+ assert len(timesteps.shape) == 1
21
+
22
+ half_dim = embedding_dim // 2
23
+ emb = math.log(10000) / (half_dim - 1)
24
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
25
+ emb = emb.to(device=timesteps.device)
26
+ emb = timesteps.float()[:, None] * emb[None, :]
27
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
28
+ if embedding_dim % 2 == 1: # zero pad
29
+ emb = torch.nn.functional.pad(emb, (0,1,0,0))
30
+ return emb
31
+
32
+
33
+ def nonlinearity(x):
34
+ # swish
35
+ return x*torch.sigmoid(x)
36
+
37
+
38
+ def Normalize(in_channels, num_groups=32):
39
+ return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
40
+
41
+
42
+ class Upsample(nn.Module):
43
+ def __init__(self, in_channels, with_conv):
44
+ super().__init__()
45
+ self.with_conv = with_conv
46
+ if self.with_conv:
47
+ self.conv = torch.nn.Conv2d(in_channels,
48
+ in_channels,
49
+ kernel_size=3,
50
+ stride=1,
51
+ padding=1)
52
+
53
+ def forward(self, x):
54
+ x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
55
+ if self.with_conv:
56
+ x = self.conv(x)
57
+ return x
58
+
59
+
60
+ class Downsample(nn.Module):
61
+ def __init__(self, in_channels, with_conv):
62
+ super().__init__()
63
+ self.with_conv = with_conv
64
+ if self.with_conv:
65
+ # no asymmetric padding in torch conv, must do it ourselves
66
+ self.conv = torch.nn.Conv2d(in_channels,
67
+ in_channels,
68
+ kernel_size=3,
69
+ stride=2,
70
+ padding=0)
71
+
72
+ def forward(self, x):
73
+ if self.with_conv:
74
+ pad = (0,1,0,1)
75
+ x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
76
+ x = self.conv(x)
77
+ else:
78
+ x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
79
+ return x
80
+
81
+
82
+ class ResnetBlock(nn.Module):
83
+ def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
84
+ dropout, temb_channels=512):
85
+ super().__init__()
86
+ self.in_channels = in_channels
87
+ out_channels = in_channels if out_channels is None else out_channels
88
+ self.out_channels = out_channels
89
+ self.use_conv_shortcut = conv_shortcut
90
+
91
+ self.norm1 = Normalize(in_channels)
92
+ self.conv1 = torch.nn.Conv2d(in_channels,
93
+ out_channels,
94
+ kernel_size=3,
95
+ stride=1,
96
+ padding=1)
97
+ if temb_channels > 0:
98
+ self.temb_proj = torch.nn.Linear(temb_channels,
99
+ out_channels)
100
+ self.norm2 = Normalize(out_channels)
101
+ self.dropout = torch.nn.Dropout(dropout)
102
+ self.conv2 = torch.nn.Conv2d(out_channels,
103
+ out_channels,
104
+ kernel_size=3,
105
+ stride=1,
106
+ padding=1)
107
+ if self.in_channels != self.out_channels:
108
+ if self.use_conv_shortcut:
109
+ self.conv_shortcut = torch.nn.Conv2d(in_channels,
110
+ out_channels,
111
+ kernel_size=3,
112
+ stride=1,
113
+ padding=1)
114
+ else:
115
+ self.nin_shortcut = torch.nn.Conv2d(in_channels,
116
+ out_channels,
117
+ kernel_size=1,
118
+ stride=1,
119
+ padding=0)
120
+
121
+ def forward(self, x, temb):
122
+ h = x
123
+ h = self.norm1(h)
124
+ h = nonlinearity(h)
125
+ h = self.conv1(h)
126
+
127
+ if temb is not None:
128
+ h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
129
+
130
+ h = self.norm2(h)
131
+ h = nonlinearity(h)
132
+ h = self.dropout(h)
133
+ h = self.conv2(h)
134
+
135
+ if self.in_channels != self.out_channels:
136
+ if self.use_conv_shortcut:
137
+ x = self.conv_shortcut(x)
138
+ else:
139
+ x = self.nin_shortcut(x)
140
+
141
+ return x+h
142
+
143
+
144
+ class LinAttnBlock(LinearAttention):
145
+ """to match AttnBlock usage"""
146
+ def __init__(self, in_channels):
147
+ super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
148
+
149
+
150
+ class AttnBlock(nn.Module):
151
+ def __init__(self, in_channels):
152
+ super().__init__()
153
+ self.in_channels = in_channels
154
+
155
+ self.norm = Normalize(in_channels)
156
+ self.q = torch.nn.Conv2d(in_channels,
157
+ in_channels,
158
+ kernel_size=1,
159
+ stride=1,
160
+ padding=0)
161
+ self.k = torch.nn.Conv2d(in_channels,
162
+ in_channels,
163
+ kernel_size=1,
164
+ stride=1,
165
+ padding=0)
166
+ self.v = torch.nn.Conv2d(in_channels,
167
+ in_channels,
168
+ kernel_size=1,
169
+ stride=1,
170
+ padding=0)
171
+ self.proj_out = torch.nn.Conv2d(in_channels,
172
+ in_channels,
173
+ kernel_size=1,
174
+ stride=1,
175
+ padding=0)
176
+
177
+
178
+ def forward(self, x):
179
+ h_ = x
180
+ h_ = self.norm(h_)
181
+ q = self.q(h_)
182
+ k = self.k(h_)
183
+ v = self.v(h_)
184
+
185
+ # compute attention
186
+ b,c,h,w = q.shape
187
+ q = q.reshape(b,c,h*w)
188
+ q = q.permute(0,2,1) # b,hw,c
189
+ k = k.reshape(b,c,h*w) # b,c,hw
190
+ w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
191
+ w_ = w_ * (int(c)**(-0.5))
192
+ w_ = torch.nn.functional.softmax(w_, dim=2)
193
+
194
+ # attend to values
195
+ v = v.reshape(b,c,h*w)
196
+ w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
197
+ h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
198
+ h_ = h_.reshape(b,c,h,w)
199
+
200
+ h_ = self.proj_out(h_)
201
+
202
+ return x+h_
203
+
204
+
205
+ def make_attn(in_channels, attn_type="vanilla"):
206
+ assert attn_type in ["vanilla", "linear", "none", "vanilla_groupconv", "crossattention"], f'attn_type {attn_type} unknown'
207
+ # print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
208
+ if attn_type == "vanilla":
209
+ return AttnBlock(in_channels)
210
+ elif attn_type == 'vanilla_groupconv':
211
+ return AttnBlock_GroupConv(in_channels)
212
+ elif attn_type == 'crossattention':
213
+ num_heads = 8
214
+ return TriplaneAttentionBlock(in_channels, num_heads, in_channels // num_heads, True)
215
+ elif attn_type == "none":
216
+ return nn.Identity(in_channels)
217
+ else:
218
+ return LinAttnBlock(in_channels)
219
+
220
+
221
+ class Model(nn.Module):
222
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
223
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
224
+ resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
225
+ super().__init__()
226
+ if use_linear_attn: attn_type = "linear"
227
+ self.ch = ch
228
+ self.temb_ch = self.ch*4
229
+ self.num_resolutions = len(ch_mult)
230
+ self.num_res_blocks = num_res_blocks
231
+ self.resolution = resolution
232
+ self.in_channels = in_channels
233
+
234
+ self.use_timestep = use_timestep
235
+ if self.use_timestep:
236
+ # timestep embedding
237
+ self.temb = nn.Module()
238
+ self.temb.dense = nn.ModuleList([
239
+ torch.nn.Linear(self.ch,
240
+ self.temb_ch),
241
+ torch.nn.Linear(self.temb_ch,
242
+ self.temb_ch),
243
+ ])
244
+
245
+ # downsampling
246
+ self.conv_in = torch.nn.Conv2d(in_channels,
247
+ self.ch,
248
+ kernel_size=3,
249
+ stride=1,
250
+ padding=1)
251
+
252
+ curr_res = resolution
253
+ in_ch_mult = (1,)+tuple(ch_mult)
254
+ self.down = nn.ModuleList()
255
+ for i_level in range(self.num_resolutions):
256
+ block = nn.ModuleList()
257
+ attn = nn.ModuleList()
258
+ block_in = ch*in_ch_mult[i_level]
259
+ block_out = ch*ch_mult[i_level]
260
+ for i_block in range(self.num_res_blocks):
261
+ block.append(ResnetBlock(in_channels=block_in,
262
+ out_channels=block_out,
263
+ temb_channels=self.temb_ch,
264
+ dropout=dropout))
265
+ block_in = block_out
266
+ if curr_res in attn_resolutions:
267
+ attn.append(make_attn(block_in, attn_type=attn_type))
268
+ down = nn.Module()
269
+ down.block = block
270
+ down.attn = attn
271
+ if i_level != self.num_resolutions-1:
272
+ down.downsample = Downsample(block_in, resamp_with_conv)
273
+ curr_res = curr_res // 2
274
+ self.down.append(down)
275
+
276
+ # middle
277
+ self.mid = nn.Module()
278
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
279
+ out_channels=block_in,
280
+ temb_channels=self.temb_ch,
281
+ dropout=dropout)
282
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
283
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
284
+ out_channels=block_in,
285
+ temb_channels=self.temb_ch,
286
+ dropout=dropout)
287
+
288
+ # upsampling
289
+ self.up = nn.ModuleList()
290
+ for i_level in reversed(range(self.num_resolutions)):
291
+ block = nn.ModuleList()
292
+ attn = nn.ModuleList()
293
+ block_out = ch*ch_mult[i_level]
294
+ skip_in = ch*ch_mult[i_level]
295
+ for i_block in range(self.num_res_blocks+1):
296
+ if i_block == self.num_res_blocks:
297
+ skip_in = ch*in_ch_mult[i_level]
298
+ block.append(ResnetBlock(in_channels=block_in+skip_in,
299
+ out_channels=block_out,
300
+ temb_channels=self.temb_ch,
301
+ dropout=dropout))
302
+ block_in = block_out
303
+ if curr_res in attn_resolutions:
304
+ attn.append(make_attn(block_in, attn_type=attn_type))
305
+ up = nn.Module()
306
+ up.block = block
307
+ up.attn = attn
308
+ if i_level != 0:
309
+ up.upsample = Upsample(block_in, resamp_with_conv)
310
+ curr_res = curr_res * 2
311
+ self.up.insert(0, up) # prepend to get consistent order
312
+
313
+ # end
314
+ self.norm_out = Normalize(block_in)
315
+ self.conv_out = torch.nn.Conv2d(block_in,
316
+ out_ch,
317
+ kernel_size=3,
318
+ stride=1,
319
+ padding=1)
320
+
321
+ def forward(self, x, t=None, context=None):
322
+ #assert x.shape[2] == x.shape[3] == self.resolution
323
+ if context is not None:
324
+ # assume aligned context, cat along channel axis
325
+ x = torch.cat((x, context), dim=1)
326
+ if self.use_timestep:
327
+ # timestep embedding
328
+ assert t is not None
329
+ temb = get_timestep_embedding(t, self.ch)
330
+ temb = self.temb.dense[0](temb)
331
+ temb = nonlinearity(temb)
332
+ temb = self.temb.dense[1](temb)
333
+ else:
334
+ temb = None
335
+
336
+ # downsampling
337
+ hs = [self.conv_in(x)]
338
+ for i_level in range(self.num_resolutions):
339
+ for i_block in range(self.num_res_blocks):
340
+ h = self.down[i_level].block[i_block](hs[-1], temb)
341
+ if len(self.down[i_level].attn) > 0:
342
+ h = self.down[i_level].attn[i_block](h)
343
+ hs.append(h)
344
+ if i_level != self.num_resolutions-1:
345
+ hs.append(self.down[i_level].downsample(hs[-1]))
346
+
347
+ # middle
348
+ h = hs[-1]
349
+ h = self.mid.block_1(h, temb)
350
+ h = self.mid.attn_1(h)
351
+ h = self.mid.block_2(h, temb)
352
+
353
+ # upsampling
354
+ for i_level in reversed(range(self.num_resolutions)):
355
+ for i_block in range(self.num_res_blocks+1):
356
+ h = self.up[i_level].block[i_block](
357
+ torch.cat([h, hs.pop()], dim=1), temb)
358
+ if len(self.up[i_level].attn) > 0:
359
+ h = self.up[i_level].attn[i_block](h)
360
+ if i_level != 0:
361
+ h = self.up[i_level].upsample(h)
362
+
363
+ # end
364
+ h = self.norm_out(h)
365
+ h = nonlinearity(h)
366
+ h = self.conv_out(h)
367
+ return h
368
+
369
+ def get_last_layer(self):
370
+ return self.conv_out.weight
371
+
372
+
373
+ class Encoder(nn.Module):
374
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
375
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
376
+ resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
377
+ **ignore_kwargs):
378
+ super().__init__()
379
+ if use_linear_attn: attn_type = "linear"
380
+ self.ch = ch
381
+ self.temb_ch = 0
382
+ self.num_resolutions = len(ch_mult)
383
+ self.num_res_blocks = num_res_blocks
384
+ self.resolution = resolution
385
+ self.in_channels = in_channels
386
+
387
+ # downsampling
388
+ self.conv_in = torch.nn.Conv2d(in_channels,
389
+ self.ch,
390
+ kernel_size=3,
391
+ stride=1,
392
+ padding=1)
393
+
394
+ curr_res = resolution
395
+ in_ch_mult = (1,)+tuple(ch_mult)
396
+ self.in_ch_mult = in_ch_mult
397
+ self.down = nn.ModuleList()
398
+ for i_level in range(self.num_resolutions):
399
+ block = nn.ModuleList()
400
+ attn = nn.ModuleList()
401
+ block_in = ch*in_ch_mult[i_level]
402
+ block_out = ch*ch_mult[i_level]
403
+ for i_block in range(self.num_res_blocks):
404
+ block.append(ResnetBlock(in_channels=block_in,
405
+ out_channels=block_out,
406
+ temb_channels=self.temb_ch,
407
+ dropout=dropout))
408
+ block_in = block_out
409
+ if curr_res in attn_resolutions:
410
+ attn.append(make_attn(block_in, attn_type=attn_type))
411
+ down = nn.Module()
412
+ down.block = block
413
+ down.attn = attn
414
+ if i_level != self.num_resolutions-1:
415
+ down.downsample = Downsample(block_in, resamp_with_conv)
416
+ curr_res = curr_res // 2
417
+ self.down.append(down)
418
+
419
+ # middle
420
+ self.mid = nn.Module()
421
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
422
+ out_channels=block_in,
423
+ temb_channels=self.temb_ch,
424
+ dropout=dropout)
425
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
426
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
427
+ out_channels=block_in,
428
+ temb_channels=self.temb_ch,
429
+ dropout=dropout)
430
+
431
+ # end
432
+ self.norm_out = Normalize(block_in)
433
+ self.conv_out = torch.nn.Conv2d(block_in,
434
+ 2*z_channels if double_z else z_channels,
435
+ kernel_size=3,
436
+ stride=1,
437
+ padding=1)
438
+
439
+ def forward(self, x):
440
+ # timestep embedding
441
+ temb = None
442
+
443
+ # downsampling
444
+ hs = [self.conv_in(x)]
445
+ for i_level in range(self.num_resolutions):
446
+ for i_block in range(self.num_res_blocks):
447
+ h = self.down[i_level].block[i_block](hs[-1], temb)
448
+ if len(self.down[i_level].attn) > 0:
449
+ h = self.down[i_level].attn[i_block](h)
450
+ hs.append(h)
451
+ if i_level != self.num_resolutions-1:
452
+ hs.append(self.down[i_level].downsample(hs[-1]))
453
+
454
+ # middle
455
+ h = hs[-1]
456
+ h = self.mid.block_1(h, temb)
457
+ h = self.mid.attn_1(h)
458
+ h = self.mid.block_2(h, temb)
459
+
460
+ # end
461
+ h = self.norm_out(h)
462
+ h = nonlinearity(h)
463
+ h = self.conv_out(h)
464
+ return h
465
+
466
+
467
+ class Decoder(nn.Module):
468
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
469
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
470
+ resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
471
+ attn_type="vanilla", **ignorekwargs):
472
+ super().__init__()
473
+ if use_linear_attn: attn_type = "linear"
474
+ self.ch = ch
475
+ self.temb_ch = 0
476
+ self.num_resolutions = len(ch_mult)
477
+ self.num_res_blocks = num_res_blocks
478
+ self.resolution = resolution
479
+ self.in_channels = in_channels
480
+ self.give_pre_end = give_pre_end
481
+ self.tanh_out = tanh_out
482
+
483
+ # compute in_ch_mult, block_in and curr_res at lowest res
484
+ in_ch_mult = (1,)+tuple(ch_mult)
485
+ block_in = ch*ch_mult[self.num_resolutions-1]
486
+ curr_res = resolution // 2**(self.num_resolutions-1)
487
+ self.z_shape = (1,z_channels,curr_res,curr_res)
488
+ # print("Working with z of shape {} = {} dimensions.".format(
489
+ # self.z_shape, np.prod(self.z_shape)))
490
+
491
+ # z to block_in
492
+ self.conv_in = torch.nn.Conv2d(z_channels,
493
+ block_in,
494
+ kernel_size=3,
495
+ stride=1,
496
+ padding=1)
497
+
498
+ # middle
499
+ self.mid = nn.Module()
500
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
501
+ out_channels=block_in,
502
+ temb_channels=self.temb_ch,
503
+ dropout=dropout)
504
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
505
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
506
+ out_channels=block_in,
507
+ temb_channels=self.temb_ch,
508
+ dropout=dropout)
509
+
510
+ # upsampling
511
+ self.up = nn.ModuleList()
512
+ for i_level in reversed(range(self.num_resolutions)):
513
+ block = nn.ModuleList()
514
+ attn = nn.ModuleList()
515
+ block_out = ch*ch_mult[i_level]
516
+ for i_block in range(self.num_res_blocks+1):
517
+ block.append(ResnetBlock(in_channels=block_in,
518
+ out_channels=block_out,
519
+ temb_channels=self.temb_ch,
520
+ dropout=dropout))
521
+ block_in = block_out
522
+ if curr_res in attn_resolutions:
523
+ attn.append(make_attn(block_in, attn_type=attn_type))
524
+ up = nn.Module()
525
+ up.block = block
526
+ up.attn = attn
527
+ if i_level != 0:
528
+ up.upsample = Upsample(block_in, resamp_with_conv)
529
+ curr_res = curr_res * 2
530
+ self.up.insert(0, up) # prepend to get consistent order
531
+
532
+ # end
533
+ self.norm_out = Normalize(block_in)
534
+ self.conv_out = torch.nn.Conv2d(block_in,
535
+ out_ch,
536
+ kernel_size=3,
537
+ stride=1,
538
+ padding=1)
539
+
540
+ def forward(self, z):
541
+ #assert z.shape[1:] == self.z_shape[1:]
542
+ self.last_z_shape = z.shape
543
+
544
+ # timestep embedding
545
+ temb = None
546
+
547
+ # z to block_in
548
+ h = self.conv_in(z)
549
+
550
+ # middle
551
+ h = self.mid.block_1(h, temb)
552
+ h = self.mid.attn_1(h)
553
+ h = self.mid.block_2(h, temb)
554
+
555
+ # upsampling
556
+ for i_level in reversed(range(self.num_resolutions)):
557
+ for i_block in range(self.num_res_blocks+1):
558
+ h = self.up[i_level].block[i_block](h, temb)
559
+ if len(self.up[i_level].attn) > 0:
560
+ h = self.up[i_level].attn[i_block](h)
561
+ if i_level != 0:
562
+ h = self.up[i_level].upsample(h)
563
+
564
+ # end
565
+ if self.give_pre_end:
566
+ return h
567
+
568
+ h = self.norm_out(h)
569
+ h = nonlinearity(h)
570
+ h = self.conv_out(h)
571
+ if self.tanh_out:
572
+ h = torch.tanh(h)
573
+ return h
574
+
575
+
576
+ class SimpleDecoder(nn.Module):
577
+ def __init__(self, in_channels, out_channels, *args, **kwargs):
578
+ super().__init__()
579
+ self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
580
+ ResnetBlock(in_channels=in_channels,
581
+ out_channels=2 * in_channels,
582
+ temb_channels=0, dropout=0.0),
583
+ ResnetBlock(in_channels=2 * in_channels,
584
+ out_channels=4 * in_channels,
585
+ temb_channels=0, dropout=0.0),
586
+ ResnetBlock(in_channels=4 * in_channels,
587
+ out_channels=2 * in_channels,
588
+ temb_channels=0, dropout=0.0),
589
+ nn.Conv2d(2*in_channels, in_channels, 1),
590
+ Upsample(in_channels, with_conv=True)])
591
+ # end
592
+ self.norm_out = Normalize(in_channels)
593
+ self.conv_out = torch.nn.Conv2d(in_channels,
594
+ out_channels,
595
+ kernel_size=3,
596
+ stride=1,
597
+ padding=1)
598
+
599
+ def forward(self, x):
600
+ for i, layer in enumerate(self.model):
601
+ if i in [1,2,3]:
602
+ x = layer(x, None)
603
+ else:
604
+ x = layer(x)
605
+
606
+ h = self.norm_out(x)
607
+ h = nonlinearity(h)
608
+ x = self.conv_out(h)
609
+ return x
610
+
611
+
612
+ class UpsampleDecoder(nn.Module):
613
+ def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
614
+ ch_mult=(2,2), dropout=0.0):
615
+ super().__init__()
616
+ # upsampling
617
+ self.temb_ch = 0
618
+ self.num_resolutions = len(ch_mult)
619
+ self.num_res_blocks = num_res_blocks
620
+ block_in = in_channels
621
+ curr_res = resolution // 2 ** (self.num_resolutions - 1)
622
+ self.res_blocks = nn.ModuleList()
623
+ self.upsample_blocks = nn.ModuleList()
624
+ for i_level in range(self.num_resolutions):
625
+ res_block = []
626
+ block_out = ch * ch_mult[i_level]
627
+ for i_block in range(self.num_res_blocks + 1):
628
+ res_block.append(ResnetBlock(in_channels=block_in,
629
+ out_channels=block_out,
630
+ temb_channels=self.temb_ch,
631
+ dropout=dropout))
632
+ block_in = block_out
633
+ self.res_blocks.append(nn.ModuleList(res_block))
634
+ if i_level != self.num_resolutions - 1:
635
+ self.upsample_blocks.append(Upsample(block_in, True))
636
+ curr_res = curr_res * 2
637
+
638
+ # end
639
+ self.norm_out = Normalize(block_in)
640
+ self.conv_out = torch.nn.Conv2d(block_in,
641
+ out_channels,
642
+ kernel_size=3,
643
+ stride=1,
644
+ padding=1)
645
+
646
+ def forward(self, x):
647
+ # upsampling
648
+ h = x
649
+ for k, i_level in enumerate(range(self.num_resolutions)):
650
+ for i_block in range(self.num_res_blocks + 1):
651
+ h = self.res_blocks[i_level][i_block](h, None)
652
+ if i_level != self.num_resolutions - 1:
653
+ h = self.upsample_blocks[k](h)
654
+ h = self.norm_out(h)
655
+ h = nonlinearity(h)
656
+ h = self.conv_out(h)
657
+ return h
658
+
659
+
660
+ class LatentRescaler(nn.Module):
661
+ def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
662
+ super().__init__()
663
+ # residual block, interpolate, residual block
664
+ self.factor = factor
665
+ self.conv_in = nn.Conv2d(in_channels,
666
+ mid_channels,
667
+ kernel_size=3,
668
+ stride=1,
669
+ padding=1)
670
+ self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
671
+ out_channels=mid_channels,
672
+ temb_channels=0,
673
+ dropout=0.0) for _ in range(depth)])
674
+ self.attn = AttnBlock(mid_channels)
675
+ self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
676
+ out_channels=mid_channels,
677
+ temb_channels=0,
678
+ dropout=0.0) for _ in range(depth)])
679
+
680
+ self.conv_out = nn.Conv2d(mid_channels,
681
+ out_channels,
682
+ kernel_size=1,
683
+ )
684
+
685
+ def forward(self, x):
686
+ x = self.conv_in(x)
687
+ for block in self.res_block1:
688
+ x = block(x, None)
689
+ x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
690
+ x = self.attn(x)
691
+ for block in self.res_block2:
692
+ x = block(x, None)
693
+ x = self.conv_out(x)
694
+ return x
695
+
696
+
697
+ class MergedRescaleEncoder(nn.Module):
698
+ def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
699
+ attn_resolutions, dropout=0.0, resamp_with_conv=True,
700
+ ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
701
+ super().__init__()
702
+ intermediate_chn = ch * ch_mult[-1]
703
+ self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
704
+ z_channels=intermediate_chn, double_z=False, resolution=resolution,
705
+ attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
706
+ out_ch=None)
707
+ self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
708
+ mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
709
+
710
+ def forward(self, x):
711
+ x = self.encoder(x)
712
+ x = self.rescaler(x)
713
+ return x
714
+
715
+
716
+ class MergedRescaleDecoder(nn.Module):
717
+ def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
718
+ dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
719
+ super().__init__()
720
+ tmp_chn = z_channels*ch_mult[-1]
721
+ self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
722
+ resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
723
+ ch_mult=ch_mult, resolution=resolution, ch=ch)
724
+ self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
725
+ out_channels=tmp_chn, depth=rescale_module_depth)
726
+
727
+ def forward(self, x):
728
+ x = self.rescaler(x)
729
+ x = self.decoder(x)
730
+ return x
731
+
732
+
733
+ class Upsampler(nn.Module):
734
+ def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
735
+ super().__init__()
736
+ assert out_size >= in_size
737
+ num_blocks = int(np.log2(out_size//in_size))+1
738
+ factor_up = 1.+ (out_size % in_size)
739
+ print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
740
+ self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
741
+ out_channels=in_channels)
742
+ self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
743
+ attn_resolutions=[], in_channels=None, ch=in_channels,
744
+ ch_mult=[ch_mult for _ in range(num_blocks)])
745
+
746
+ def forward(self, x):
747
+ x = self.rescaler(x)
748
+ x = self.decoder(x)
749
+ return x
750
+
751
+
752
+ class Resize(nn.Module):
753
+ def __init__(self, in_channels=None, learned=False, mode="bilinear"):
754
+ super().__init__()
755
+ self.with_conv = learned
756
+ self.mode = mode
757
+ if self.with_conv:
758
+ print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
759
+ raise NotImplementedError()
760
+ assert in_channels is not None
761
+ # no asymmetric padding in torch conv, must do it ourselves
762
+ self.conv = torch.nn.Conv2d(in_channels,
763
+ in_channels,
764
+ kernel_size=4,
765
+ stride=2,
766
+ padding=1)
767
+
768
+ def forward(self, x, scale_factor=1.0):
769
+ if scale_factor==1.0:
770
+ return x
771
+ else:
772
+ x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
773
+ return x
774
+
775
+ class FirstStagePostProcessor(nn.Module):
776
+
777
+ def __init__(self, ch_mult:list, in_channels,
778
+ pretrained_model:nn.Module=None,
779
+ reshape=False,
780
+ n_channels=None,
781
+ dropout=0.,
782
+ pretrained_config=None):
783
+ super().__init__()
784
+ if pretrained_config is None:
785
+ assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
786
+ self.pretrained_model = pretrained_model
787
+ else:
788
+ assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
789
+ self.instantiate_pretrained(pretrained_config)
790
+
791
+ self.do_reshape = reshape
792
+
793
+ if n_channels is None:
794
+ n_channels = self.pretrained_model.encoder.ch
795
+
796
+ self.proj_norm = Normalize(in_channels,num_groups=in_channels//2)
797
+ self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3,
798
+ stride=1,padding=1)
799
+
800
+ blocks = []
801
+ downs = []
802
+ ch_in = n_channels
803
+ for m in ch_mult:
804
+ blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout))
805
+ ch_in = m * n_channels
806
+ downs.append(Downsample(ch_in, with_conv=False))
807
+
808
+ self.model = nn.ModuleList(blocks)
809
+ self.downsampler = nn.ModuleList(downs)
810
+
811
+
812
+ def instantiate_pretrained(self, config):
813
+ model = instantiate_from_config(config)
814
+ self.pretrained_model = model.eval()
815
+ # self.pretrained_model.train = False
816
+ for param in self.pretrained_model.parameters():
817
+ param.requires_grad = False
818
+
819
+
820
+ @torch.no_grad()
821
+ def encode_with_pretrained(self,x):
822
+ c = self.pretrained_model.encode(x)
823
+ if isinstance(c, DiagonalGaussianDistribution):
824
+ c = c.mode()
825
+ return c
826
+
827
+ def forward(self,x):
828
+ z_fs = self.encode_with_pretrained(x)
829
+ z = self.proj_norm(z_fs)
830
+ z = self.proj(z)
831
+ z = nonlinearity(z)
832
+
833
+ for submodel, downmodel in zip(self.model,self.downsampler):
834
+ z = submodel(z,temb=None)
835
+ z = downmodel(z)
836
+
837
+ if self.do_reshape:
838
+ z = rearrange(z,'b c h w -> b (h w) c')
839
+ return z
840
+
841
+
842
+ class DiagonalGaussianDistribution(object):
843
+ def __init__(self, parameters, deterministic=False):
844
+ self.parameters = parameters
845
+ self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
846
+ self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
847
+ self.deterministic = deterministic
848
+ self.std = torch.exp(0.5 * self.logvar)
849
+ self.var = torch.exp(self.logvar)
850
+ if self.deterministic:
851
+ self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
852
+
853
+ def sample(self):
854
+ x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
855
+ return x
856
+
857
+ def kl(self, other=None):
858
+ if self.deterministic:
859
+ return torch.Tensor([0.])
860
+ else:
861
+ if other is None:
862
+ return 0.5 * torch.sum(torch.pow(self.mean, 2)
863
+ + self.var - 1.0 - self.logvar,
864
+ dim=[1, 2, 3])
865
+ else:
866
+ return 0.5 * torch.sum(
867
+ torch.pow(self.mean - other.mean, 2) / other.var
868
+ + self.var / other.var - 1.0 - self.logvar + other.logvar,
869
+ dim=[1, 2, 3])
870
+
871
+ def nll(self, sample, dims=[1,2,3]):
872
+ if self.deterministic:
873
+ return torch.Tensor([0.])
874
+ logtwopi = np.log(2.0 * np.pi)
875
+ return 0.5 * torch.sum(
876
+ logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
877
+ dim=dims)
878
+
879
+ def mode(self):
880
+ return self.mean
881
+
882
+
883
+ class ResnetBlock_GroupConv(nn.Module):
884
+ def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
885
+ dropout, temb_channels=512):
886
+ super().__init__()
887
+ self.in_channels = in_channels
888
+ out_channels = in_channels if out_channels is None else out_channels
889
+ self.out_channels = out_channels
890
+ self.use_conv_shortcut = conv_shortcut
891
+
892
+ self.norm1 = Normalize(in_channels * 3, 32 * 3)
893
+ self.conv1 = torch.nn.Conv2d(in_channels * 3,
894
+ out_channels * 3,
895
+ kernel_size=3,
896
+ stride=1,
897
+ padding=1,
898
+ groups=3)
899
+ if temb_channels > 0:
900
+ self.temb_proj = torch.nn.Linear(temb_channels,
901
+ out_channels)
902
+ self.norm2 = Normalize(out_channels * 3, 32 * 3)
903
+ self.dropout = torch.nn.Dropout(dropout)
904
+ self.conv2 = torch.nn.Conv2d(out_channels * 3,
905
+ out_channels * 3,
906
+ kernel_size=3,
907
+ stride=1,
908
+ padding=1,
909
+ groups=3)
910
+ if self.in_channels != self.out_channels:
911
+ if self.use_conv_shortcut:
912
+ self.conv_shortcut = torch.nn.Conv2d(in_channels * 3,
913
+ out_channels * 3,
914
+ kernel_size=3,
915
+ stride=1,
916
+ padding=1,
917
+ groups=3)
918
+ else:
919
+ self.nin_shortcut = torch.nn.Conv2d(in_channels * 3,
920
+ out_channels * 3,
921
+ kernel_size=1,
922
+ stride=1,
923
+ padding=0,
924
+ groups=3)
925
+
926
+ def forward(self, x, temb):
927
+ h = x
928
+ h = self.norm1(h)
929
+ h = nonlinearity(h)
930
+ h = self.conv1(h)
931
+
932
+ assert temb is None
933
+ if temb is not None:
934
+ h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
935
+
936
+ h = self.norm2(h)
937
+ h = nonlinearity(h)
938
+ h = self.dropout(h)
939
+ h = self.conv2(h)
940
+
941
+ if self.in_channels != self.out_channels:
942
+ if self.use_conv_shortcut:
943
+ x = self.conv_shortcut(x)
944
+ else:
945
+ x = self.nin_shortcut(x)
946
+
947
+ return x+h
948
+
949
+
950
+ def rollout(triplane):
951
+ res = triplane.shape[-1]
952
+ ch = triplane.shape[1]
953
+ triplane = triplane.reshape(-1, 3, ch//3, res, res).permute(0, 2, 3, 1, 4).reshape(-1, ch//3, res, 3 * res)
954
+ return triplane
955
+
956
+ def unrollout(triplane):
957
+ res = triplane.shape[-2]
958
+ ch = 3 * triplane.shape[1]
959
+ triplane = triplane.reshape(-1, ch//3, res, 3, res).permute(0, 3, 1, 2, 4).reshape(-1, ch, res, res)
960
+ return triplane
961
+
962
+ class Upsample_GroupConv(nn.Module):
963
+ def __init__(self, in_channels, with_conv):
964
+ super().__init__()
965
+ self.with_conv = with_conv
966
+ if self.with_conv:
967
+ self.conv = torch.nn.Conv2d(in_channels * 3,
968
+ in_channels * 3,
969
+ kernel_size=3,
970
+ stride=1,
971
+ padding=1,
972
+ groups=3)
973
+
974
+ def forward(self, x):
975
+ x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
976
+ if self.with_conv:
977
+ x = self.conv(x)
978
+ return x
979
+
980
+
981
+ class Downsample_GroupConv(nn.Module):
982
+ def __init__(self, in_channels, with_conv):
983
+ super().__init__()
984
+ self.with_conv = with_conv
985
+ if self.with_conv:
986
+ # no asymmetric padding in torch conv, must do it ourselves
987
+ self.conv = torch.nn.Conv2d(in_channels * 3,
988
+ in_channels * 3,
989
+ kernel_size=3,
990
+ stride=2,
991
+ padding=0,
992
+ groups=3)
993
+
994
+ def forward(self, x):
995
+ if self.with_conv:
996
+ pad = (0,1,0,1)
997
+ x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
998
+ x = self.conv(x)
999
+ else:
1000
+ x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
1001
+ return x
1002
+
1003
+ class AttnBlock_GroupConv(nn.Module):
1004
+ def __init__(self, in_channels):
1005
+ super().__init__()
1006
+ self.in_channels = in_channels
1007
+
1008
+ self.norm = Normalize(in_channels)
1009
+ self.q = torch.nn.Conv2d(in_channels,
1010
+ in_channels,
1011
+ kernel_size=1,
1012
+ stride=1,
1013
+ padding=0)
1014
+ self.k = torch.nn.Conv2d(in_channels,
1015
+ in_channels,
1016
+ kernel_size=1,
1017
+ stride=1,
1018
+ padding=0)
1019
+ self.v = torch.nn.Conv2d(in_channels,
1020
+ in_channels,
1021
+ kernel_size=1,
1022
+ stride=1,
1023
+ padding=0)
1024
+ self.proj_out = torch.nn.Conv2d(in_channels,
1025
+ in_channels,
1026
+ kernel_size=1,
1027
+ stride=1,
1028
+ padding=0)
1029
+
1030
+
1031
+ def forward(self, x, temp=None):
1032
+ x = rollout(x)
1033
+ h_ = x
1034
+ h_ = self.norm(h_)
1035
+ q = self.q(h_)
1036
+ k = self.k(h_)
1037
+ v = self.v(h_)
1038
+
1039
+ # compute attention
1040
+ b,c,h,w = q.shape
1041
+ q = q.reshape(b,c,h*w)
1042
+ q = q.permute(0,2,1) # b,hw,c
1043
+ k = k.reshape(b,c,h*w) # b,c,hw
1044
+ w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
1045
+ w_ = w_ * (int(c)**(-0.5))
1046
+ w_ = torch.nn.functional.softmax(w_, dim=2)
1047
+
1048
+ # attend to values
1049
+ v = v.reshape(b,c,h*w)
1050
+ w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
1051
+ h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
1052
+ h_ = h_.reshape(b,c,h,w)
1053
+
1054
+ h_ = self.proj_out(h_)
1055
+
1056
+ return unrollout(x+h_)
1057
+
1058
+
1059
+ from torch import nn, einsum
1060
+ from inspect import isfunction
1061
+ from einops import rearrange, repeat
1062
+
1063
+ def exists(val):
1064
+ return val is not None
1065
+
1066
+ def default(val, d):
1067
+ if exists(val):
1068
+ return val
1069
+ return d() if isfunction(d) else d
1070
+
1071
+ def checkpoint(func, inputs, params, flag):
1072
+ """
1073
+ Evaluate a function without caching intermediate activations, allowing for
1074
+ reduced memory at the expense of extra compute in the backward pass.
1075
+ :param func: the function to evaluate.
1076
+ :param inputs: the argument sequence to pass to `func`.
1077
+ :param params: a sequence of parameters `func` depends on but does not
1078
+ explicitly take as arguments.
1079
+ :param flag: if False, disable gradient checkpointing.
1080
+ """
1081
+ if flag:
1082
+ args = tuple(inputs) + tuple(params)
1083
+ return CheckpointFunction.apply(func, len(inputs), *args)
1084
+ else:
1085
+ return func(*inputs)
1086
+
1087
+ class CheckpointFunction(torch.autograd.Function):
1088
+ @staticmethod
1089
+ def forward(ctx, run_function, length, *args):
1090
+ ctx.run_function = run_function
1091
+ ctx.input_tensors = list(args[:length])
1092
+ ctx.input_params = list(args[length:])
1093
+
1094
+ with torch.no_grad():
1095
+ output_tensors = ctx.run_function(*ctx.input_tensors)
1096
+ return output_tensors
1097
+
1098
+ @staticmethod
1099
+ def backward(ctx, *output_grads):
1100
+ ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
1101
+ with torch.enable_grad():
1102
+ # Fixes a bug where the first op in run_function modifies the
1103
+ # Tensor storage in place, which is not allowed for detach()'d
1104
+ # Tensors.
1105
+ shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
1106
+ output_tensors = ctx.run_function(*shallow_copies)
1107
+ input_grads = torch.autograd.grad(
1108
+ output_tensors,
1109
+ ctx.input_tensors + ctx.input_params,
1110
+ output_grads,
1111
+ allow_unused=True,
1112
+ )
1113
+ del ctx.input_tensors
1114
+ del ctx.input_params
1115
+ del output_tensors
1116
+ return (None, None) + input_grads
1117
+
1118
+ class CrossAttention(nn.Module):
1119
+ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
1120
+ super().__init__()
1121
+ inner_dim = dim_head * heads
1122
+ context_dim = default(context_dim, query_dim)
1123
+
1124
+ self.scale = dim_head ** -0.5
1125
+ self.heads = heads
1126
+
1127
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
1128
+ self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
1129
+ self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
1130
+
1131
+ self.to_out = nn.Sequential(
1132
+ nn.Linear(inner_dim, query_dim),
1133
+ nn.Dropout(dropout)
1134
+ )
1135
+
1136
+ def forward(self, x, context=None, mask=None):
1137
+ h = self.heads
1138
+
1139
+ x = x.permute(0, 2, 1)
1140
+ context = context.permute(0, 2, 1)
1141
+
1142
+ q = self.to_q(x)
1143
+ context = default(context, x)
1144
+ k = self.to_k(context)
1145
+ v = self.to_v(context)
1146
+
1147
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
1148
+
1149
+ sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
1150
+
1151
+ if exists(mask):
1152
+ mask = rearrange(mask, 'b ... -> b (...)')
1153
+ max_neg_value = -torch.finfo(sim.dtype).max
1154
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
1155
+ sim.masked_fill_(~mask, max_neg_value)
1156
+
1157
+ # attention, what we cannot get enough of
1158
+ attn = sim.softmax(dim=-1)
1159
+
1160
+ out = einsum('b i j, b j d -> b i d', attn, v)
1161
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
1162
+ return self.to_out(out).permute(0, 2, 1)
1163
+
1164
+ def normalization(channels):
1165
+ """
1166
+ Make a standard normalization layer.
1167
+ :param channels: number of input channels.
1168
+ :return: an nn.Module for normalization.
1169
+ """
1170
+ return GroupNorm32(32, channels)
1171
+
1172
+ class GroupNorm32(nn.GroupNorm):
1173
+ def forward(self, x):
1174
+ return super().forward(x.float()).type(x.dtype)
1175
+
1176
+ class TriplaneAttentionBlock(nn.Module):
1177
+ def __init__(
1178
+ self,
1179
+ channels,
1180
+ num_heads=1,
1181
+ num_head_channels=-1,
1182
+ use_checkpoint=False,
1183
+ use_new_attention_order=False,
1184
+ ):
1185
+ super().__init__()
1186
+ self.channels = channels
1187
+ if num_head_channels == -1:
1188
+ self.num_heads = num_heads
1189
+ else:
1190
+ assert (
1191
+ channels % num_head_channels == 0
1192
+ ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
1193
+ self.num_heads = channels // num_head_channels
1194
+ self.use_checkpoint = use_checkpoint
1195
+ self.norm = normalization(channels)
1196
+
1197
+ self.plane1_ca = CrossAttention(channels, channels, self.num_heads, num_head_channels)
1198
+ self.plane2_ca = CrossAttention(channels, channels, self.num_heads, num_head_channels)
1199
+ self.plane3_ca = CrossAttention(channels, channels, self.num_heads, num_head_channels)
1200
+
1201
+ def forward(self, x, temp=None):
1202
+ return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
1203
+ #return pt_checkpoint(self._forward, x) # pytorch
1204
+
1205
+ def _forward(self, x):
1206
+ x = rollout(x)
1207
+
1208
+ b, c, *spatial = x.shape
1209
+ res = x.shape[-2]
1210
+ plane1 = x[..., :res].reshape(b, c, -1)
1211
+ plane2 = x[..., res:res*2].reshape(b, c, -1)
1212
+ plane3 = x[..., 2*res:3*res].reshape(b, c, -1)
1213
+ x = x.reshape(b, c, -1)
1214
+
1215
+ plane1_output = self.plane1_ca(self.norm(plane1), self.norm(x))
1216
+ plane2_output = self.plane2_ca(self.norm(plane2), self.norm(x))
1217
+ plane3_output = self.plane3_ca(self.norm(plane3), self.norm(x))
1218
+
1219
+ h = torch.cat([plane1_output, plane2_output, plane3_output], -1)
1220
+
1221
+ x = (x + h).reshape(b, c, *spatial)
1222
+
1223
+ return unrollout(x)
1224
+
1225
+
1226
+ class Encoder_GroupConv(nn.Module):
1227
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
1228
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
1229
+ resolution, z_channels, double_z=True, use_linear_attn=False,
1230
+ attn_type="vanilla_groupconv", mid_layers=1,
1231
+ **ignore_kwargs):
1232
+ super().__init__()
1233
+ assert not use_linear_attn
1234
+ self.ch = ch
1235
+ self.temb_ch = 0
1236
+ self.num_resolutions = len(ch_mult)
1237
+ self.num_res_blocks = num_res_blocks
1238
+ self.resolution = resolution
1239
+ self.in_channels = in_channels
1240
+
1241
+ # downsampling
1242
+ # self.conv_in = torch.nn.Conv2d(in_channels,
1243
+ # self.ch,
1244
+ # kernel_size=3,
1245
+ # stride=1,
1246
+ # padding=1)
1247
+ self.conv_in = torch.nn.Conv2d(in_channels * 3,
1248
+ self.ch * 3,
1249
+ kernel_size=3,
1250
+ stride=1,
1251
+ padding=1,
1252
+ groups=3)
1253
+
1254
+ curr_res = resolution
1255
+ in_ch_mult = (1,)+tuple(ch_mult)
1256
+ self.in_ch_mult = in_ch_mult
1257
+ self.down = nn.ModuleList()
1258
+ for i_level in range(self.num_resolutions):
1259
+ block = nn.ModuleList()
1260
+ attn = nn.ModuleList()
1261
+ block_in = ch*in_ch_mult[i_level]
1262
+ block_out = ch*ch_mult[i_level]
1263
+ for i_block in range(self.num_res_blocks):
1264
+ block.append(ResnetBlock_GroupConv(in_channels=block_in,
1265
+ out_channels=block_out,
1266
+ temb_channels=self.temb_ch,
1267
+ dropout=dropout))
1268
+ block_in = block_out
1269
+ if curr_res in attn_resolutions:
1270
+ attn.append(make_attn(block_in, attn_type=attn_type))
1271
+ down = nn.Module()
1272
+ down.block = block
1273
+ down.attn = attn
1274
+ if i_level != self.num_resolutions-1:
1275
+ down.downsample = Downsample_GroupConv(block_in, resamp_with_conv)
1276
+ curr_res = curr_res // 2
1277
+ self.down.append(down)
1278
+
1279
+ # middle
1280
+ self.attn_type = attn_type
1281
+ self.mid = nn.Module()
1282
+ if attn_type == 'crossattention':
1283
+ self.mid.block_1 = nn.ModuleList()
1284
+ for _ in range(mid_layers):
1285
+ self.mid.block_1.append(
1286
+ ResnetBlock_GroupConv(in_channels=block_in,
1287
+ out_channels=block_in,
1288
+ temb_channels=self.temb_ch,
1289
+ dropout=dropout)
1290
+ )
1291
+ self.mid.block_1.append(
1292
+ make_attn(block_in, attn_type=attn_type)
1293
+ )
1294
+ else:
1295
+ self.mid.block_1 = ResnetBlock_GroupConv(in_channels=block_in,
1296
+ out_channels=block_in,
1297
+ temb_channels=self.temb_ch,
1298
+ dropout=dropout)
1299
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
1300
+ self.mid.block_2 = ResnetBlock_GroupConv(in_channels=block_in,
1301
+ out_channels=block_in,
1302
+ temb_channels=self.temb_ch,
1303
+ dropout=dropout)
1304
+
1305
+ # end
1306
+ self.norm_out = Normalize(block_in * 3, 32 * 3)
1307
+ self.conv_out = torch.nn.Conv2d(block_in * 3,
1308
+ 2*z_channels * 3 if double_z else z_channels * 3,
1309
+ kernel_size=3,
1310
+ stride=1,
1311
+ padding=1)
1312
+
1313
+ def forward(self, x):
1314
+ # timestep embedding
1315
+ temb = None
1316
+
1317
+ x = unrollout(x)
1318
+
1319
+ # downsampling
1320
+ hs = [self.conv_in(x)]
1321
+ for i_level in range(self.num_resolutions):
1322
+ for i_block in range(self.num_res_blocks):
1323
+ h = self.down[i_level].block[i_block](hs[-1], temb)
1324
+ if len(self.down[i_level].attn) > 0:
1325
+ h = self.down[i_level].attn[i_block](h)
1326
+ hs.append(h)
1327
+ if i_level != self.num_resolutions-1:
1328
+ hs.append(self.down[i_level].downsample(hs[-1]))
1329
+
1330
+ # middle
1331
+ h = hs[-1]
1332
+ if self.attn_type == 'crossattention':
1333
+ for m in self.mid.block_1:
1334
+ h = m(h, temb)
1335
+ else:
1336
+ h = self.mid.block_1(h, temb)
1337
+ h = self.mid.attn_1(h)
1338
+ h = self.mid.block_2(h, temb)
1339
+
1340
+ # end
1341
+ h = self.norm_out(h)
1342
+ h = nonlinearity(h)
1343
+ h = self.conv_out(h)
1344
+
1345
+ h = rollout(h)
1346
+
1347
+ return h
1348
+
1349
+ class Decoder_GroupConv(nn.Module):
1350
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
1351
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
1352
+ resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
1353
+ attn_type="vanilla_groupconv", mid_layers=1, **ignorekwargs):
1354
+ super().__init__()
1355
+ assert not use_linear_attn
1356
+ self.ch = ch
1357
+ self.temb_ch = 0
1358
+ self.num_resolutions = len(ch_mult)
1359
+ self.num_res_blocks = num_res_blocks
1360
+ self.resolution = resolution
1361
+ self.in_channels = in_channels
1362
+ self.give_pre_end = give_pre_end
1363
+ self.tanh_out = tanh_out
1364
+
1365
+ # compute in_ch_mult, block_in and curr_res at lowest res
1366
+ in_ch_mult = (1,)+tuple(ch_mult)
1367
+ block_in = ch*ch_mult[self.num_resolutions-1]
1368
+ curr_res = resolution // 2**(self.num_resolutions-1)
1369
+ self.z_shape = (1,z_channels,curr_res,curr_res)
1370
+ # print("Working with z of shape {} = {} dimensions.".format(
1371
+ # self.z_shape, np.prod(self.z_shape)))
1372
+
1373
+ # z to block_in
1374
+ self.conv_in = torch.nn.Conv2d(z_channels * 3,
1375
+ block_in * 3,
1376
+ kernel_size=3,
1377
+ stride=1,
1378
+ padding=1,
1379
+ groups=3)
1380
+
1381
+ # middle
1382
+ self.mid = nn.Module()
1383
+ self.attn_type = attn_type
1384
+ if attn_type == 'crossattention':
1385
+ self.mid.block_1 = nn.ModuleList()
1386
+ for _ in range(mid_layers):
1387
+ self.mid.block_1.append(
1388
+ ResnetBlock_GroupConv(in_channels=block_in,
1389
+ out_channels=block_in,
1390
+ temb_channels=self.temb_ch,
1391
+ dropout=dropout)
1392
+ )
1393
+ self.mid.block_1.append(
1394
+ make_attn(block_in, attn_type=attn_type)
1395
+ )
1396
+ else:
1397
+ self.mid.block_1 = ResnetBlock_GroupConv(in_channels=block_in,
1398
+ out_channels=block_in,
1399
+ temb_channels=self.temb_ch,
1400
+ dropout=dropout)
1401
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
1402
+ self.mid.block_2 = ResnetBlock_GroupConv(in_channels=block_in,
1403
+ out_channels=block_in,
1404
+ temb_channels=self.temb_ch,
1405
+ dropout=dropout)
1406
+
1407
+ # upsampling
1408
+ self.up = nn.ModuleList()
1409
+ for i_level in reversed(range(self.num_resolutions)):
1410
+ block = nn.ModuleList()
1411
+ attn = nn.ModuleList()
1412
+ block_out = ch*ch_mult[i_level]
1413
+ for i_block in range(self.num_res_blocks+1):
1414
+ block.append(ResnetBlock_GroupConv(in_channels=block_in,
1415
+ out_channels=block_out,
1416
+ temb_channels=self.temb_ch,
1417
+ dropout=dropout))
1418
+ block_in = block_out
1419
+ if curr_res in attn_resolutions:
1420
+ attn.append(make_attn(block_in, attn_type=attn_type))
1421
+ up = nn.Module()
1422
+ up.block = block
1423
+ up.attn = attn
1424
+ if i_level != 0:
1425
+ up.upsample = Upsample_GroupConv(block_in, resamp_with_conv)
1426
+ curr_res = curr_res * 2
1427
+ self.up.insert(0, up) # prepend to get consistent order
1428
+
1429
+ # end
1430
+ self.norm_out = Normalize(block_in * 3, 32 * 3)
1431
+ self.conv_out = torch.nn.Conv2d(block_in * 3,
1432
+ out_ch * 3,
1433
+ kernel_size=3,
1434
+ stride=1,
1435
+ padding=1,
1436
+ groups=3)
1437
+
1438
+ def forward(self, z):
1439
+ #assert z.shape[1:] == self.z_shape[1:]
1440
+ self.last_z_shape = z.shape
1441
+
1442
+ z = unrollout(z)
1443
+
1444
+ # timestep embedding
1445
+ temb = None
1446
+
1447
+ # z to block_in
1448
+ h = self.conv_in(z)
1449
+
1450
+ # middle
1451
+ if self.attn_type == 'crossattention':
1452
+ for m in self.mid.block_1:
1453
+ h = m(h, temb)
1454
+ else:
1455
+ h = self.mid.block_1(h, temb)
1456
+ h = self.mid.attn_1(h)
1457
+ h = self.mid.block_2(h, temb)
1458
+
1459
+ # upsampling
1460
+ for i_level in reversed(range(self.num_resolutions)):
1461
+ for i_block in range(self.num_res_blocks+1):
1462
+ h = self.up[i_level].block[i_block](h, temb)
1463
+ if len(self.up[i_level].attn) > 0:
1464
+ h = self.up[i_level].attn[i_block](h)
1465
+ if i_level != 0:
1466
+ h = self.up[i_level].upsample(h)
1467
+
1468
+ # end
1469
+ if self.give_pre_end:
1470
+ return h
1471
+
1472
+ h = self.norm_out(h)
1473
+ h = nonlinearity(h)
1474
+ h = self.conv_out(h)
1475
+ if self.tanh_out:
1476
+ h = torch.tanh(h)
1477
+
1478
+ h = rollout(h)
1479
+
1480
+ return h
1481
+
1482
+
1483
+
1484
+ # not success attempts
1485
+ class CrossAttnFuseBlock_GroupConv(nn.Module):
1486
+ def __init__(self, in_channels):
1487
+ super().__init__()
1488
+ self.in_channels = in_channels
1489
+
1490
+ self.norm = Normalize(in_channels)
1491
+ self.q0 = torch.nn.Conv2d(in_channels,
1492
+ in_channels,
1493
+ kernel_size=1,
1494
+ stride=1,
1495
+ padding=0)
1496
+ self.k0 = torch.nn.Conv2d(in_channels,
1497
+ in_channels,
1498
+ kernel_size=1,
1499
+ stride=1,
1500
+ padding=0)
1501
+ self.v0 = torch.nn.Conv2d(in_channels,
1502
+ in_channels,
1503
+ kernel_size=1,
1504
+ stride=1,
1505
+ padding=0)
1506
+ self.q1 = torch.nn.Conv2d(in_channels,
1507
+ in_channels,
1508
+ kernel_size=1,
1509
+ stride=1,
1510
+ padding=0)
1511
+ self.k1 = torch.nn.Conv2d(in_channels,
1512
+ in_channels,
1513
+ kernel_size=1,
1514
+ stride=1,
1515
+ padding=0)
1516
+ self.v1 = torch.nn.Conv2d(in_channels,
1517
+ in_channels,
1518
+ kernel_size=1,
1519
+ stride=1,
1520
+ padding=0)
1521
+ self.q2 = torch.nn.Conv2d(in_channels,
1522
+ in_channels,
1523
+ kernel_size=1,
1524
+ stride=1,
1525
+ padding=0)
1526
+ self.k2 = torch.nn.Conv2d(in_channels,
1527
+ in_channels,
1528
+ kernel_size=1,
1529
+ stride=1,
1530
+ padding=0)
1531
+ self.v2 = torch.nn.Conv2d(in_channels,
1532
+ in_channels,
1533
+ kernel_size=1,
1534
+ stride=1,
1535
+ padding=0)
1536
+ self.proj_out0 = torch.nn.Conv2d(in_channels,
1537
+ in_channels,
1538
+ kernel_size=1,
1539
+ stride=1,
1540
+ padding=0)
1541
+ self.proj_out1 = torch.nn.Conv2d(in_channels,
1542
+ in_channels,
1543
+ kernel_size=1,
1544
+ stride=1,
1545
+ padding=0)
1546
+ self.proj_out2 = torch.nn.Conv2d(in_channels,
1547
+ in_channels,
1548
+ kernel_size=1,
1549
+ stride=1,
1550
+ padding=0)
1551
+
1552
+ self.fuse_out = torch.nn.Conv2d(in_channels * 3,
1553
+ in_channels,
1554
+ kernel_size=1,
1555
+ stride=1,
1556
+ padding=0)
1557
+
1558
+ def forward(self, x):
1559
+ x = rollout(x)
1560
+
1561
+ b, c, *spatial = x.shape
1562
+ res = x.shape[-2]
1563
+ plane1 = x[..., :res].reshape(b, c, res, res)
1564
+ plane2 = x[..., res:res*2].reshape(b, c, res, res)
1565
+ plane3 = x[..., 2*res:3*res].reshape(b, c, res, res)
1566
+
1567
+ # h_ = x
1568
+ # h_ = self.norm(h_)
1569
+ # q = self.q(h_)
1570
+ # k = self.k(h_)
1571
+ # v = self.v(h_)
1572
+
1573
+ q0 = self.q0(self.norm(plane2))
1574
+ k0 = self.k0(self.norm(plane2))
1575
+ v0 = self.v0(self.norm(plane2))
1576
+
1577
+ q1 = self.q1(self.norm(plane2))
1578
+ k1 = self.k1(self.norm(plane1))
1579
+ v1 = self.v1(self.norm(plane1))
1580
+
1581
+ q2 = self.q2(self.norm(plane2))
1582
+ k2 = self.k2(self.norm(plane3))
1583
+ v2 = self.v2(self.norm(plane3))
1584
+
1585
+ def compute_attention(q, k, v):
1586
+ # compute attention
1587
+ b,c,h,w = q.shape
1588
+ q = q.reshape(b,c,h*w)
1589
+ q = q.permute(0,2,1) # b,hw,c
1590
+ k = k.reshape(b,c,h*w) # b,c,hw
1591
+ w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
1592
+ w_ = w_ * (int(c)**(-0.5))
1593
+ w_ = torch.nn.functional.softmax(w_, dim=2)
1594
+ # attend to values
1595
+ v = v.reshape(b,c,h*w)
1596
+ w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
1597
+ h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
1598
+ h_ = h_.reshape(b,c,h,w)
1599
+
1600
+ return h_
1601
+
1602
+ h0 = compute_attention(q0, k0, v0)
1603
+ h0 = self.proj_out0(h0)
1604
+
1605
+ h1 = compute_attention(q1, k1, v1)
1606
+ h1 = self.proj_out1(h1)
1607
+
1608
+ h2 = compute_attention(q2, k2, v2)
1609
+ h2 = self.proj_out2(h2)
1610
+
1611
+ fuse_out = self.fuse_out(
1612
+ torch.cat([h0, h1, h2], 1)
1613
+ )
1614
+
1615
+ return fuse_out
1616
+
1617
+ class CrossAttnDecodeBlock_GroupConv(nn.Module):
1618
+ def __init__(self, in_channels, h, w):
1619
+ super().__init__()
1620
+ self.in_channels = in_channels
1621
+ self.h = h
1622
+ self.w = w
1623
+
1624
+ self.norm = Normalize(in_channels)
1625
+ self.q0 = torch.nn.Conv2d(in_channels,
1626
+ in_channels,
1627
+ kernel_size=1,
1628
+ stride=1,
1629
+ padding=0)
1630
+ self.k0 = torch.nn.Conv2d(in_channels,
1631
+ in_channels,
1632
+ kernel_size=1,
1633
+ stride=1,
1634
+ padding=0)
1635
+ self.v0 = torch.nn.Conv2d(in_channels,
1636
+ in_channels,
1637
+ kernel_size=1,
1638
+ stride=1,
1639
+ padding=0)
1640
+
1641
+ self.q1 = torch.nn.Parameter(torch.randn(1, self.in_channels, h, w))
1642
+ self.q1.requires_grad = True
1643
+
1644
+ self.k1 = torch.nn.Conv2d(in_channels,
1645
+ in_channels,
1646
+ kernel_size=1,
1647
+ stride=1,
1648
+ padding=0)
1649
+ self.v1 = torch.nn.Conv2d(in_channels,
1650
+ in_channels,
1651
+ kernel_size=1,
1652
+ stride=1,
1653
+ padding=0)
1654
+
1655
+ self.q2 = torch.nn.Parameter(torch.randn(1, self.in_channels, h, w))
1656
+ self.q2.requires_grad = True
1657
+
1658
+ self.k2 = torch.nn.Conv2d(in_channels,
1659
+ in_channels,
1660
+ kernel_size=1,
1661
+ stride=1,
1662
+ padding=0)
1663
+ self.v2 = torch.nn.Conv2d(in_channels,
1664
+ in_channels,
1665
+ kernel_size=1,
1666
+ stride=1,
1667
+ padding=0)
1668
+ self.proj_out0 = torch.nn.Conv2d(in_channels,
1669
+ in_channels,
1670
+ kernel_size=1,
1671
+ stride=1,
1672
+ padding=0)
1673
+ self.proj_out1 = torch.nn.Conv2d(in_channels,
1674
+ in_channels,
1675
+ kernel_size=1,
1676
+ stride=1,
1677
+ padding=0)
1678
+ self.proj_out2 = torch.nn.Conv2d(in_channels,
1679
+ in_channels,
1680
+ kernel_size=1,
1681
+ stride=1,
1682
+ padding=0)
1683
+
1684
+ self.fuse_out = torch.nn.Conv2d(in_channels,
1685
+ in_channels,
1686
+ kernel_size=1,
1687
+ stride=1,
1688
+ padding=0)
1689
+
1690
+ def forward(self, x):
1691
+ # x = rollout(x)
1692
+
1693
+ b, c, *spatial = x.shape
1694
+ res = x.shape[-2]
1695
+ # plane1 = x[..., :res].reshape(b, c, res, res)
1696
+ # plane2 = x[..., res:res*2].reshape(b, c, res, res)
1697
+ # plane3 = x[..., 2*res:3*res].reshape(b, c, res, res)
1698
+
1699
+ # h_ = x
1700
+ # h_ = self.norm(h_)
1701
+ # q = self.q(h_)
1702
+ # k = self.k(h_)
1703
+ # v = self.v(h_)
1704
+
1705
+ q0 = self.q0(self.norm(x))
1706
+ k0 = self.k0(self.norm(x))
1707
+ v0 = self.v0(self.norm(x))
1708
+
1709
+ q1 = self.q1.repeat(b, 1, 1, 1)
1710
+ k1 = self.k1(self.norm(x))
1711
+ v1 = self.v1(self.norm(x))
1712
+
1713
+ q2 = self.q2.repeat(b, 1, 1, 1)
1714
+ k2 = self.k2(self.norm(x))
1715
+ v2 = self.v2(self.norm(x))
1716
+
1717
+ def compute_attention(q, k, v):
1718
+ # compute attention
1719
+ b,c,h,w = q.shape
1720
+ q = q.reshape(b,c,h*w)
1721
+ q = q.permute(0,2,1) # b,hw,c
1722
+ k = k.reshape(b,c,h*w) # b,c,hw
1723
+ w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
1724
+ w_ = w_ * (int(c)**(-0.5))
1725
+ w_ = torch.nn.functional.softmax(w_, dim=2)
1726
+ # attend to values
1727
+ v = v.reshape(b,c,h*w)
1728
+ w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
1729
+ h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
1730
+ h_ = h_.reshape(b,c,h,w)
1731
+ return h_
1732
+
1733
+ h0 = compute_attention(q0, k0, v0)
1734
+ h0 = self.proj_out0(h0)
1735
+
1736
+ h1 = compute_attention(q1, k1, v1)
1737
+ h1 = self.proj_out1(h1)
1738
+
1739
+ h2 = compute_attention(q2, k2, v2)
1740
+ h2 = self.proj_out2(h2)
1741
+
1742
+ fuse_out = self.fuse_out(
1743
+ torch.cat([h1, h0, h2], -1)
1744
+ )
1745
+
1746
+ fuse_out = unrollout(fuse_out)
1747
+
1748
+ return fuse_out
1749
+
1750
+ class Encoder_GroupConv_LateFusion(nn.Module):
1751
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
1752
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
1753
+ resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla_groupconv",
1754
+ **ignore_kwargs):
1755
+ super().__init__()
1756
+ assert not use_linear_attn
1757
+ self.ch = ch
1758
+ self.temb_ch = 0
1759
+ self.num_resolutions = len(ch_mult)
1760
+ self.num_res_blocks = num_res_blocks
1761
+ self.resolution = resolution
1762
+ self.in_channels = in_channels
1763
+
1764
+ # downsampling
1765
+ self.conv_in = torch.nn.Conv2d(in_channels * 3,
1766
+ self.ch * 3,
1767
+ kernel_size=3,
1768
+ stride=1,
1769
+ padding=1,
1770
+ groups=3)
1771
+
1772
+ curr_res = resolution
1773
+ in_ch_mult = (1,)+tuple(ch_mult)
1774
+ self.in_ch_mult = in_ch_mult
1775
+ self.down = nn.ModuleList()
1776
+ for i_level in range(self.num_resolutions):
1777
+ block = nn.ModuleList()
1778
+ attn = nn.ModuleList()
1779
+ block_in = ch*in_ch_mult[i_level]
1780
+ block_out = ch*ch_mult[i_level]
1781
+ for i_block in range(self.num_res_blocks):
1782
+ block.append(ResnetBlock_GroupConv(in_channels=block_in,
1783
+ out_channels=block_out,
1784
+ temb_channels=self.temb_ch,
1785
+ dropout=dropout))
1786
+ block_in = block_out
1787
+ if curr_res in attn_resolutions:
1788
+ attn.append(make_attn(block_in, attn_type=attn_type))
1789
+ down = nn.Module()
1790
+ down.block = block
1791
+ down.attn = attn
1792
+ if i_level != self.num_resolutions-1:
1793
+ down.downsample = Downsample_GroupConv(block_in, resamp_with_conv)
1794
+ curr_res = curr_res // 2
1795
+ self.down.append(down)
1796
+
1797
+ # middle
1798
+ self.mid = nn.Module()
1799
+ self.mid.block_1 = ResnetBlock_GroupConv(in_channels=block_in,
1800
+ out_channels=block_in,
1801
+ temb_channels=self.temb_ch,
1802
+ dropout=dropout)
1803
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
1804
+ self.mid.block_2 = ResnetBlock_GroupConv(in_channels=block_in,
1805
+ out_channels=block_in,
1806
+ temb_channels=self.temb_ch,
1807
+ dropout=dropout)
1808
+
1809
+ # fuse to one plane
1810
+ self.fuse = CrossAttnFuseBlock_GroupConv(block_in)
1811
+
1812
+ # end
1813
+ self.norm_out = Normalize(block_in, 32)
1814
+ self.conv_out = torch.nn.Conv2d(block_in,
1815
+ 2*z_channels if double_z else z_channels,
1816
+ kernel_size=3,
1817
+ stride=1,
1818
+ padding=1)
1819
+
1820
+ def forward(self, x):
1821
+ # timestep embedding
1822
+ temb = None
1823
+
1824
+ x = unrollout(x)
1825
+
1826
+ # downsampling
1827
+ hs = [self.conv_in(x)]
1828
+ for i_level in range(self.num_resolutions):
1829
+ for i_block in range(self.num_res_blocks):
1830
+ h = self.down[i_level].block[i_block](hs[-1], temb)
1831
+ if len(self.down[i_level].attn) > 0:
1832
+ h = self.down[i_level].attn[i_block](h)
1833
+ hs.append(h)
1834
+ if i_level != self.num_resolutions-1:
1835
+ hs.append(self.down[i_level].downsample(hs[-1]))
1836
+
1837
+ # middle
1838
+ h = hs[-1]
1839
+ h = self.mid.block_1(h, temb)
1840
+ h = self.mid.attn_1(h)
1841
+ h = self.mid.block_2(h, temb)
1842
+
1843
+ h = self.fuse(h)
1844
+
1845
+ # end
1846
+ h = self.norm_out(h)
1847
+ h = nonlinearity(h)
1848
+ h = self.conv_out(h)
1849
+
1850
+ # h = rollout(h)
1851
+
1852
+ return h
1853
+
1854
+ class Decoder_GroupConv_LateFusion(nn.Module):
1855
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
1856
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
1857
+ resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
1858
+ attn_type="vanilla_groupconv", **ignorekwargs):
1859
+ super().__init__()
1860
+ assert not use_linear_attn
1861
+ self.ch = ch
1862
+ self.temb_ch = 0
1863
+ self.num_resolutions = len(ch_mult)
1864
+ self.num_res_blocks = num_res_blocks
1865
+ self.resolution = resolution
1866
+ self.in_channels = in_channels
1867
+ self.give_pre_end = give_pre_end
1868
+ self.tanh_out = tanh_out
1869
+
1870
+ # compute in_ch_mult, block_in and curr_res at lowest res
1871
+ in_ch_mult = (1,)+tuple(ch_mult)
1872
+ block_in = ch*ch_mult[self.num_resolutions-1]
1873
+ curr_res = resolution // 2**(self.num_resolutions-1)
1874
+ self.z_shape = (1,z_channels,curr_res,curr_res)
1875
+ # print("Working with z of shape {} = {} dimensions.".format(
1876
+ # self.z_shape, np.prod(self.z_shape)))
1877
+
1878
+ # z to block_in
1879
+ self.conv_in = torch.nn.Conv2d(z_channels,
1880
+ block_in,
1881
+ kernel_size=3,
1882
+ stride=1,
1883
+ padding=1)
1884
+
1885
+ # triplane decoder
1886
+ self.triplane_decoder = CrossAttnDecodeBlock_GroupConv(block_in, curr_res, curr_res)
1887
+
1888
+ # middle
1889
+ self.mid = nn.Module()
1890
+ self.mid.block_1 = ResnetBlock_GroupConv(in_channels=block_in,
1891
+ out_channels=block_in,
1892
+ temb_channels=self.temb_ch,
1893
+ dropout=dropout)
1894
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
1895
+ self.mid.block_2 = ResnetBlock_GroupConv(in_channels=block_in,
1896
+ out_channels=block_in,
1897
+ temb_channels=self.temb_ch,
1898
+ dropout=dropout)
1899
+
1900
+ # upsampling
1901
+ self.up = nn.ModuleList()
1902
+ for i_level in reversed(range(self.num_resolutions)):
1903
+ block = nn.ModuleList()
1904
+ attn = nn.ModuleList()
1905
+ block_out = ch*ch_mult[i_level]
1906
+ for i_block in range(self.num_res_blocks+1):
1907
+ block.append(ResnetBlock_GroupConv(in_channels=block_in,
1908
+ out_channels=block_out,
1909
+ temb_channels=self.temb_ch,
1910
+ dropout=dropout))
1911
+ block_in = block_out
1912
+ if curr_res in attn_resolutions:
1913
+ attn.append(make_attn(block_in, attn_type=attn_type))
1914
+ up = nn.Module()
1915
+ up.block = block
1916
+ up.attn = attn
1917
+ if i_level != 0:
1918
+ up.upsample = Upsample_GroupConv(block_in, resamp_with_conv)
1919
+ curr_res = curr_res * 2
1920
+ self.up.insert(0, up) # prepend to get consistent order
1921
+
1922
+ # end
1923
+ self.norm_out = Normalize(block_in * 3, 32 * 3)
1924
+ self.conv_out = torch.nn.Conv2d(block_in * 3,
1925
+ out_ch * 3,
1926
+ kernel_size=3,
1927
+ stride=1,
1928
+ padding=1,
1929
+ groups=3)
1930
+
1931
+ def forward(self, z):
1932
+ #assert z.shape[1:] == self.z_shape[1:]
1933
+ self.last_z_shape = z.shape
1934
+
1935
+ # timestep embedding
1936
+ temb = None
1937
+
1938
+ # z to block_in
1939
+ h = self.conv_in(z)
1940
+
1941
+ h = self.triplane_decoder(h)
1942
+
1943
+ # middle
1944
+ h = self.mid.block_1(h, temb)
1945
+ h = self.mid.attn_1(h)
1946
+ h = self.mid.block_2(h, temb)
1947
+
1948
+ # upsampling
1949
+ for i_level in reversed(range(self.num_resolutions)):
1950
+ for i_block in range(self.num_res_blocks+1):
1951
+ h = self.up[i_level].block[i_block](h, temb)
1952
+ if len(self.up[i_level].attn) > 0:
1953
+ h = self.up[i_level].attn[i_block](h)
1954
+ if i_level != 0:
1955
+ h = self.up[i_level].upsample(h)
1956
+
1957
+ # end
1958
+ if self.give_pre_end:
1959
+ return h
1960
+
1961
+ h = self.norm_out(h)
1962
+ h = nonlinearity(h)
1963
+ h = self.conv_out(h)
1964
+ if self.tanh_out:
1965
+ h = torch.tanh(h)
1966
+
1967
+ h = rollout(h)
1968
+
1969
+ return h
1970
+
1971
+
1972
+ # VIT Encoder and Decoder from https://github.com/thuanz123/enhancing-transformers/blob/main/enhancing/modules/stage1/layers.py
1973
+ # ------------------------------------------------------------------------------------
1974
+ # Enhancing Transformers
1975
+ # Copyright (c) 2022 Thuan H. Nguyen. All Rights Reserved.
1976
+ # Licensed under the MIT License [see LICENSE for details]
1977
+ # ------------------------------------------------------------------------------------
1978
+ # Modified from ViT-Pytorch (https://github.com/lucidrains/vit-pytorch)
1979
+ # Copyright (c) 2020 Phil Wang. All Rights Reserved.
1980
+ # ------------------------------------------------------------------------------------
1981
+
1982
+ import math
1983
+ import numpy as np
1984
+ from typing import Union, Tuple, List
1985
+ from collections import OrderedDict
1986
+
1987
+ import torch
1988
+ import torch.nn as nn
1989
+ import torch.nn.functional as F
1990
+ from einops import rearrange, repeat
1991
+ from einops.layers.torch import Rearrange
1992
+
1993
+ def get_2d_sincos_pos_embed(embed_dim, grid_size):
1994
+ """
1995
+ grid_size: int or (int, int) of the grid height and width
1996
+ return:
1997
+ pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
1998
+ """
1999
+ grid_size = (grid_size, grid_size) if type(grid_size) != tuple else grid_size
2000
+ grid_h = np.arange(grid_size[0], dtype=np.float32)
2001
+ grid_w = np.arange(grid_size[1], dtype=np.float32)
2002
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
2003
+ grid = np.stack(grid, axis=0)
2004
+
2005
+ grid = grid.reshape([2, 1, grid_size[0], grid_size[1]])
2006
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
2007
+
2008
+ return pos_embed
2009
+
2010
+
2011
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
2012
+ assert embed_dim % 2 == 0
2013
+
2014
+ # use half of dimensions to encode grid_h
2015
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
2016
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
2017
+
2018
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
2019
+ return emb
2020
+
2021
+
2022
+ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
2023
+ """
2024
+ embed_dim: output dimension for each position
2025
+ pos: a list of positions to be encoded: size (M,)
2026
+ out: (M, D)
2027
+ """
2028
+ assert embed_dim % 2 == 0
2029
+ omega = np.arange(embed_dim // 2, dtype=np.float32)
2030
+ omega /= embed_dim / 2.
2031
+ omega = 1. / 10000**omega # (D/2,)
2032
+
2033
+ pos = pos.reshape(-1) # (M,)
2034
+ out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
2035
+
2036
+ emb_sin = np.sin(out) # (M, D/2)
2037
+ emb_cos = np.cos(out) # (M, D/2)
2038
+
2039
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
2040
+ return emb
2041
+
2042
+
2043
+ def init_weights(m):
2044
+ if isinstance(m, nn.Linear):
2045
+ # we use xavier_uniform following official JAX ViT:
2046
+ torch.nn.init.xavier_uniform_(m.weight)
2047
+ if m.bias is not None:
2048
+ nn.init.constant_(m.bias, 0)
2049
+ elif isinstance(m, nn.LayerNorm):
2050
+ nn.init.constant_(m.bias, 0)
2051
+ nn.init.constant_(m.weight, 1.0)
2052
+ elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
2053
+ w = m.weight.data
2054
+ torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
2055
+
2056
+
2057
+ class PreNorm(nn.Module):
2058
+ def __init__(self, dim: int, fn: nn.Module) -> None:
2059
+ super().__init__()
2060
+ self.norm = nn.LayerNorm(dim)
2061
+ self.fn = fn
2062
+
2063
+ def forward(self, x: torch.FloatTensor, **kwargs) -> torch.FloatTensor:
2064
+ return self.fn(self.norm(x), **kwargs)
2065
+
2066
+
2067
+ class FeedForward(nn.Module):
2068
+ def __init__(self, dim: int, hidden_dim: int) -> None:
2069
+ super().__init__()
2070
+ self.net = nn.Sequential(
2071
+ nn.Linear(dim, hidden_dim),
2072
+ nn.Tanh(),
2073
+ nn.Linear(hidden_dim, dim)
2074
+ )
2075
+
2076
+ def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:
2077
+ return self.net(x)
2078
+
2079
+
2080
+ class Attention(nn.Module):
2081
+ def __init__(self, dim: int, heads: int = 8, dim_head: int = 64) -> None:
2082
+ super().__init__()
2083
+ inner_dim = dim_head * heads
2084
+ project_out = not (heads == 1 and dim_head == dim)
2085
+
2086
+ self.heads = heads
2087
+ self.scale = dim_head ** -0.5
2088
+
2089
+ self.attend = nn.Softmax(dim = -1)
2090
+ self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
2091
+
2092
+ self.to_out = nn.Linear(inner_dim, dim) if project_out else nn.Identity()
2093
+
2094
+ def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:
2095
+ qkv = self.to_qkv(x).chunk(3, dim = -1)
2096
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
2097
+
2098
+ attn = torch.matmul(q, k.transpose(-1, -2)) * self.scale
2099
+ attn = self.attend(attn)
2100
+
2101
+ out = torch.matmul(attn, v)
2102
+ out = rearrange(out, 'b h n d -> b n (h d)')
2103
+
2104
+ return self.to_out(out)
2105
+
2106
+
2107
+ class Transformer(nn.Module):
2108
+ def __init__(self, dim: int, depth: int, heads: int, dim_head: int, mlp_dim: int) -> None:
2109
+ super().__init__()
2110
+ self.layers = nn.ModuleList([])
2111
+ for idx in range(depth):
2112
+ layer = nn.ModuleList([PreNorm(dim, Attention(dim, heads=heads, dim_head=dim_head)),
2113
+ PreNorm(dim, FeedForward(dim, mlp_dim))])
2114
+ self.layers.append(layer)
2115
+ self.norm = nn.LayerNorm(dim)
2116
+
2117
+ def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:
2118
+ for attn, ff in self.layers:
2119
+ x = attn(x) + x
2120
+ x = ff(x) + x
2121
+
2122
+ return self.norm(x)
2123
+
2124
+
2125
+ class ViTEncoder(nn.Module):
2126
+ def __init__(self, image_size: Union[Tuple[int, int], int], patch_size: Union[Tuple[int, int], int],
2127
+ dim: int, depth: int, heads: int, mlp_dim: int, channels: int = 3, dim_head: int = 64) -> None:
2128
+ super().__init__()
2129
+ image_height, image_width = image_size if isinstance(image_size, tuple) \
2130
+ else (image_size, image_size)
2131
+ patch_height, patch_width = patch_size if isinstance(patch_size, tuple) \
2132
+ else (patch_size, patch_size)
2133
+
2134
+ assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
2135
+ en_pos_embedding = get_2d_sincos_pos_embed(dim, (image_height // patch_height, image_width // patch_width))
2136
+
2137
+ self.num_patches = (image_height // patch_height) * (image_width // patch_width)
2138
+ self.patch_dim = channels * patch_height * patch_width
2139
+
2140
+ self.to_patch_embedding = nn.Sequential(
2141
+ nn.Conv2d(channels, dim, kernel_size=patch_size, stride=patch_size),
2142
+ Rearrange('b c h w -> b (h w) c'),
2143
+ )
2144
+
2145
+ self.patch_height = patch_height
2146
+ self.patch_width = patch_width
2147
+ self.image_height = image_height
2148
+ self.image_width = image_width
2149
+ self.dim = dim
2150
+
2151
+ self.en_pos_embedding = nn.Parameter(torch.from_numpy(en_pos_embedding).float().unsqueeze(0), requires_grad=False)
2152
+ self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim)
2153
+
2154
+ self.apply(init_weights)
2155
+
2156
+ def forward(self, img: torch.FloatTensor) -> torch.FloatTensor:
2157
+ x = self.to_patch_embedding(img)
2158
+ x = x + self.en_pos_embedding
2159
+ x = self.transformer(x)
2160
+
2161
+ x = Rearrange('b h w c -> b c h w')(x.reshape(-1, self.image_height // self.patch_height, self.image_width // self.patch_width, self.dim))
2162
+
2163
+ return x
2164
+
2165
+
2166
+ class ViTDecoder(nn.Module):
2167
+ def __init__(self, image_size: Union[Tuple[int, int], int], patch_size: Union[Tuple[int, int], int],
2168
+ dim: int, depth: int, heads: int, mlp_dim: int, channels: int = 3, dim_head: int = 64) -> None:
2169
+ super().__init__()
2170
+ image_height, image_width = image_size if isinstance(image_size, tuple) \
2171
+ else (image_size, image_size)
2172
+ patch_height, patch_width = patch_size if isinstance(patch_size, tuple) \
2173
+ else (patch_size, patch_size)
2174
+
2175
+ assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
2176
+ de_pos_embedding = get_2d_sincos_pos_embed(dim, (image_height // patch_height, image_width // patch_width))
2177
+
2178
+ self.num_patches = (image_height // patch_height) * (image_width // patch_width)
2179
+ self.patch_dim = channels * patch_height * patch_width
2180
+
2181
+ self.patch_height = patch_height
2182
+ self.patch_width = patch_width
2183
+ self.image_height = image_height
2184
+ self.image_width = image_width
2185
+ self.dim = dim
2186
+
2187
+ self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim)
2188
+ self.de_pos_embedding = nn.Parameter(torch.from_numpy(de_pos_embedding).float().unsqueeze(0), requires_grad=False)
2189
+ self.to_pixel = nn.Sequential(
2190
+ Rearrange('b (h w) c -> b c h w', h=image_height // patch_height),
2191
+ nn.ConvTranspose2d(dim, channels, kernel_size=patch_size, stride=patch_size)
2192
+ )
2193
+
2194
+ self.apply(init_weights)
2195
+
2196
+ def forward(self, token: torch.FloatTensor) -> torch.FloatTensor:
2197
+ token = Rearrange('b c h w -> b (h w) c')(token)
2198
+
2199
+ x = token + self.de_pos_embedding
2200
+ x = self.transformer(x)
2201
+ x = self.to_pixel(x)
2202
+
2203
+ return x
2204
+
2205
+ def get_last_layer(self) -> nn.Parameter:
2206
+ return self.to_pixel[-1].weight