yeungchenwa commited on
Commit
508b842
1 Parent(s): 43b3c60

[Update] Add files and checkpoint

Browse files
Files changed (48) hide show
  1. .gitattributes +2 -0
  2. .gitignore +180 -0
  3. app.py +147 -0
  4. ckpt/content_encoder.pth +3 -0
  5. ckpt/style_encoder.pth +3 -0
  6. ckpt/unet.pth +3 -0
  7. configs/fontdiffuser.py +87 -0
  8. dataset/font_dataset.py +69 -0
  9. figures/ref_imgs/ref_/345/243/244.jpg +0 -0
  10. figures/ref_imgs/ref_/345/252/232.jpg +0 -0
  11. figures/ref_imgs/ref_/346/252/200.jpg +0 -0
  12. figures/ref_imgs/ref_/346/254/237.jpg +0 -0
  13. figures/ref_imgs/ref_/347/251/227.jpg +0 -0
  14. figures/ref_imgs/ref_/347/261/215.jpg +0 -0
  15. figures/ref_imgs/ref_/347/261/215_1.jpg +0 -0
  16. figures/ref_imgs/ref_/350/234/223.jpg +0 -0
  17. figures/ref_imgs/ref_/350/261/204.jpg +0 -0
  18. figures/ref_imgs/ref_/351/227/241.jpg +0 -0
  19. figures/ref_imgs/ref_/351/233/225.jpg +0 -0
  20. figures/ref_imgs/ref_/351/236/243.jpg +0 -0
  21. figures/ref_imgs/ref_/351/246/250.jpg +0 -0
  22. figures/ref_imgs/ref_/351/262/270.jpg +0 -0
  23. figures/ref_imgs/ref_/351/267/242.jpg +0 -0
  24. figures/ref_imgs/ref_/351/271/260.jpg +0 -0
  25. figures/source_imgs/source_/347/201/250.jpg +0 -0
  26. figures/source_imgs/source_/351/207/205.jpg +0 -0
  27. figures/source_imgs/source_/351/221/253.jpg +0 -0
  28. figures/source_imgs/source_/351/221/273.jpg +0 -0
  29. requirements.txt +5 -0
  30. sample.py +252 -0
  31. src/.DS_Store +0 -0
  32. src/__init__.py +11 -0
  33. src/build.py +64 -0
  34. src/criterion.py +44 -0
  35. src/dpm_solver/dpm_solver_pytorch.py +1332 -0
  36. src/dpm_solver/pipeline_dpm_solver.py +117 -0
  37. src/model.py +110 -0
  38. src/modules/__init__.py +3 -0
  39. src/modules/attention.py +414 -0
  40. src/modules/content_encoder.py +435 -0
  41. src/modules/embeddings.py +84 -0
  42. src/modules/resnet.py +353 -0
  43. src/modules/style_encoder.py +442 -0
  44. src/modules/unet.py +299 -0
  45. src/modules/unet_blocks.py +661 -0
  46. ttf/KaiXinSongA.ttf +3 -0
  47. ttf/KaiXinSongB.ttf +3 -0
  48. utils.py +123 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ ttf/KaiXinSongA.ttf filter=lfs diff=lfs merge=lfs -text
37
+ ttf/KaiXinSongB.ttf filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Initially taken from GitHub's Python gitignore file
2
+ outputs/
3
+ run_sh/
4
+
5
+ # Byte-compiled / optimized / DLL files
6
+ __pycache__/
7
+ *.py[cod]
8
+ *$py.class
9
+
10
+ # C extensions
11
+ *.so
12
+
13
+ # tests and logs
14
+ tests/fixtures/cached_*_text.txt
15
+ logs/
16
+ lightning_logs/
17
+ lang_code_data/
18
+
19
+ # Distribution / packaging
20
+ .Python
21
+ build/
22
+ develop-eggs/
23
+ dist/
24
+ downloads/
25
+ eggs/
26
+ .eggs/
27
+ lib/
28
+ lib64/
29
+ parts/
30
+ sdist/
31
+ var/
32
+ wheels/
33
+ *.egg-info/
34
+ .installed.cfg
35
+ *.egg
36
+ MANIFEST
37
+
38
+ # PyInstaller
39
+ # Usually these files are written by a Python script from a template
40
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
41
+ *.manifest
42
+ *.spec
43
+
44
+ # Installer logs
45
+ pip-log.txt
46
+ pip-delete-this-directory.txt
47
+
48
+ # Unit test / coverage reports
49
+ htmlcov/
50
+ .tox/
51
+ .nox/
52
+ .coverage
53
+ .coverage.*
54
+ .cache
55
+ nosetests.xml
56
+ coverage.xml
57
+ *.cover
58
+ .hypothesis/
59
+ .pytest_cache/
60
+
61
+ # Translations
62
+ *.mo
63
+ *.pot
64
+
65
+ # Django stuff:
66
+ *.log
67
+ local_settings.py
68
+ db.sqlite3
69
+
70
+ # Flask stuff:
71
+ instance/
72
+ .webassets-cache
73
+
74
+ # Scrapy stuff:
75
+ .scrapy
76
+
77
+ # Sphinx documentation
78
+ docs/_build/
79
+
80
+ # PyBuilder
81
+ target/
82
+
83
+ # Jupyter Notebook
84
+ .ipynb_checkpoints
85
+
86
+ # IPython
87
+ profile_default/
88
+ ipython_config.py
89
+
90
+ # pyenv
91
+ .python-version
92
+
93
+ # celery beat schedule file
94
+ celerybeat-schedule
95
+
96
+ # SageMath parsed files
97
+ *.sage.py
98
+
99
+ # Environments
100
+ .env
101
+ .venv
102
+ env/
103
+ venv/
104
+ ENV/
105
+ env.bak/
106
+ venv.bak/
107
+
108
+ # Spyder project settings
109
+ .spyderproject
110
+ .spyproject
111
+
112
+ # Rope project settings
113
+ .ropeproject
114
+
115
+ # mkdocs documentation
116
+ /site
117
+
118
+ # mypy
119
+ .mypy_cache/
120
+ .dmypy.json
121
+ dmypy.json
122
+
123
+ # Pyre type checker
124
+ .pyre/
125
+
126
+ # vscode
127
+ .vs
128
+ .vscode
129
+
130
+ # Pycharm
131
+ .idea
132
+
133
+ # TF code
134
+ tensorflow_code
135
+
136
+ # Models
137
+ proc_data
138
+
139
+ # examples
140
+ runs
141
+ /runs_old
142
+ /wandb
143
+ /examples/runs
144
+ /examples/**/*.args
145
+ /examples/rag/sweep
146
+
147
+ # data
148
+ /data
149
+ serialization_dir
150
+
151
+ # emacs
152
+ *.*~
153
+ debug.env
154
+
155
+ # vim
156
+ .*.swp
157
+
158
+ # ctags
159
+ tags
160
+
161
+ # pre-commit
162
+ .pre-commit*
163
+
164
+ # .lock
165
+ *.lock
166
+
167
+ # DS_Store (MacOS)
168
+ .DS_Store
169
+
170
+ # RL pipelines may produce mp4 outputs
171
+ *.mp4
172
+
173
+ # dependencies
174
+ /transformers
175
+
176
+ # ruff
177
+ .ruff_cache
178
+
179
+ # wandb
180
+ wandb
app.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import gradio as gr
3
+ from sample import (arg_parse,
4
+ sampling,
5
+ load_fontdiffuer_pipeline)
6
+
7
+
8
+ def run_fontdiffuer(source_image,
9
+ character,
10
+ reference_image,
11
+ sampling_step,
12
+ guidance_scale,
13
+ batch_size):
14
+ args.character_input = False if source_image is not None else True
15
+ args.content_character = character
16
+ args.sampling_step = sampling_step
17
+ args.guidance_scale = guidance_scale
18
+ args.batch_size = batch_size
19
+ args.seed = random.randint(0, 10000)
20
+ out_image = sampling(
21
+ args=args,
22
+ pipe=pipe,
23
+ content_image=source_image,
24
+ style_image=reference_image)
25
+ return out_image
26
+
27
+
28
+ if __name__ == '__main__':
29
+ args = arg_parse()
30
+ args.demo = True
31
+ args.ckpt_dir = 'ckpt'
32
+ args.ttf_path = 'ttf/KaiXinSongA.ttf'
33
+
34
+ # load fontdiffuer pipeline
35
+ pipe = load_fontdiffuer_pipeline(args=args)
36
+
37
+ with gr.Blocks() as demo:
38
+ with gr.Row():
39
+ with gr.Column(scale=1):
40
+ gr.HTML("""
41
+ <div style="text-align: center; max-width: 1200px; margin: 20px auto;">
42
+ <h1 style="font-weight: 900; font-size: 3rem; margin: 0rem">
43
+ FontDiffuser
44
+ </h1>
45
+ <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
46
+ <a href="https://yeungchenwa.github.io/"">Zhenhua Yang</a>,
47
+ <a href="https://scholar.google.com/citations?user=6zNgcjAAAAAJ&hl=zh-CN&oi=ao"">Dezhi Peng</a>,
48
+ Yuxin Kong, Yuyi Zhang,
49
+ <a href="https://scholar.google.com/citations?user=IpmnLFcAAAAJ&hl=zh-CN&oi=ao"">Cong Yao</a>,
50
+ <a href="http://www.dlvc-lab.net/lianwen/Index.html"">Lianwen Jin</a>†
51
+ </h2>
52
+ <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
53
+ <strong>South China University of Technology</strong>, Alibaba DAMO Academy
54
+ </h2>
55
+ <h3 style="font-weight: 450; font-size: 1rem; margin: 0rem">
56
+ [<a href="https://github.com/yeungchenwa/FontDiffuser" style="color:blue;">arXiv</a>]
57
+ [<a href="https://github.com/yeungchenwa/FontDiffuser" style="color:green;">Github</a>]
58
+ </h3>
59
+ <h2 style="text-align: left; font-weight: 600; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem">
60
+ 1.We propose FontDiffuser, which is capable to generate unseen characters and styles, and it can be extended to the cross-lingual generation, such as Chinese to Korean.
61
+ </h2>
62
+ <h2 style="text-align: left; font-weight: 600; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem">
63
+ 2. FontDiffuser excels in generating complex character and handling large style variation. And it achieves state-of-the-art performance.
64
+ </h2>
65
+ </div>
66
+ """)
67
+ gr.Image('figures/result_vis.png')
68
+ gr.Image('figures/demo_tips.png')
69
+ with gr.Column(scale=1):
70
+ with gr.Row():
71
+ source_image = gr.Image(width=320, label='[Option 1] Source Image', image_mode='RGB', type='pil')
72
+ reference_image = gr.Image(width=320, label='Reference Image', image_mode='RGB', type='pil')
73
+ with gr.Row():
74
+ character = gr.Textbox(value='隆', label='[Option 2] Source Character')
75
+ with gr.Row():
76
+ fontdiffuer_output_image = gr.Image(height=200, label="FontDiffuser Output Image", image_mode='RGB', type='pil')
77
+
78
+ sampling_step = gr.Slider(20, 50, value=20, step=10,
79
+ label="Sampling Step", info="The sampling step by FontDiffuser.")
80
+ guidance_scale = gr.Slider(1, 12, value=7.5, step=0.5,
81
+ label="Scale of Classifier-free Guidance",
82
+ info="The scale used for classifier-free guidance sampling")
83
+ batch_size = gr.Slider(1, 4, value=1, step=1,
84
+ label="Batch Size", info="The number of images to be sampled.")
85
+
86
+ FontDiffuser = gr.Button('Run FontDiffuser')
87
+ gr.Markdown("## <font color=#008000, size=6>Examples that You Can Choose Below⬇️</font>")
88
+ with gr.Row():
89
+ gr.Markdown("## Examples")
90
+ with gr.Row():
91
+ with gr.Column(scale=1):
92
+ gr.Markdown("## Example 1️⃣: Source Image and Reference Image")
93
+ gr.Markdown("### In this mode, we provide both the source image and \
94
+ the reference image for you to try our demo!")
95
+ gr.Examples(
96
+ examples=[['figures/source_imgs/source_灨.jpg', 'figures/ref_imgs/ref_籍.jpg'],
97
+ ['figures/source_imgs/source_鑻.jpg', 'figures/ref_imgs/ref_鹰.jpg'],
98
+ ['figures/source_imgs/source_鑫.jpg', 'figures/ref_imgs/ref_壤.jpg'],
99
+ ['figures/source_imgs/source_釅.jpg', 'figures/ref_imgs/ref_雕.jpg']],
100
+ inputs=[source_image, reference_image]
101
+ )
102
+ with gr.Column(scale=1):
103
+ gr.Markdown("## Example 2️⃣: Character and Reference Image")
104
+ gr.Markdown("### In this mode, we provide the content character and the reference image \
105
+ for you to try our demo!")
106
+ gr.Examples(
107
+ examples=[['龍', 'figures/ref_imgs/ref_鷢.jpg'],
108
+ ['轉', 'figures/ref_imgs/ref_鲸.jpg'],
109
+ ['懭', 'figures/ref_imgs/ref_籍_1.jpg'],
110
+ ['識', 'figures/ref_imgs/ref_鞣.jpg']],
111
+ inputs=[character, reference_image]
112
+ )
113
+ with gr.Column(scale=1):
114
+ gr.Markdown("## Example 3️⃣: Reference Image")
115
+ gr.Markdown("### In this mode, we provide only the reference image, \
116
+ you can upload your own source image or you choose the character above \
117
+ to try our demo!")
118
+ gr.Examples(
119
+ examples=['figures/ref_imgs/ref_闡.jpg',
120
+ 'figures/ref_imgs/ref_雕.jpg',
121
+ 'figures/ref_imgs/ref_豄.jpg',
122
+ 'figures/ref_imgs/ref_馨.jpg',
123
+ 'figures/ref_imgs/ref_鲸.jpg',
124
+ 'figures/ref_imgs/ref_檀.jpg',
125
+ 'figures/ref_imgs/ref_鞣.jpg',
126
+ 'figures/ref_imgs/ref_穗.jpg',
127
+ 'figures/ref_imgs/ref_欟.jpg',
128
+ 'figures/ref_imgs/ref_籍_1.jpg',
129
+ 'figures/ref_imgs/ref_鷢.jpg',
130
+ 'figures/ref_imgs/ref_媚.jpg',
131
+ 'figures/ref_imgs/ref_籍.jpg',
132
+ 'figures/ref_imgs/ref_壤.jpg',
133
+ 'figures/ref_imgs/ref_蜓.jpg',
134
+ 'figures/ref_imgs/ref_鹰.jpg'],
135
+ examples_per_page=20,
136
+ inputs=reference_image
137
+ )
138
+ FontDiffuser.click(
139
+ fn=run_fontdiffuer,
140
+ inputs=[source_image,
141
+ character,
142
+ reference_image,
143
+ sampling_step,
144
+ guidance_scale,
145
+ batch_size],
146
+ outputs=fontdiffuer_output_image)
147
+ demo.launch(debug=True)
ckpt/content_encoder.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5b52582473579031bd0f935abbb9a3e5cb3727dccc25e75f77d1f41d3cbb3ff
3
+ size 4765643
ckpt/style_encoder.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82eb56abc37ebf7e662d1141a45d8a54ad4bc0ee8aa749c4bb7bc7bddb6cca46
3
+ size 82410027
ckpt/unet.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bde1920ac8d843edbfffa6e6befedc5da39f753b927ce272cfc85cf99dcbfdb
3
+ size 315147685
configs/fontdiffuser.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+
4
+ def get_parser():
5
+ parser = argparse.ArgumentParser(description="Training config for FontDiffuser.")
6
+ ################# Experience #################
7
+ parser.add_argument("--seed", type=int, default=123, help="A seed for reproducible training.")
8
+ parser.add_argument("--experience_name", type=str, default="fontdiffuer_training")
9
+ parser.add_argument("--data_root", type=str, default=None,
10
+ help="The font dataset root path.",)
11
+ parser.add_argument("--output_dir", type=str, default=None,
12
+ help="The output directory where the model predictions and checkpoints will be written.")
13
+ parser.add_argument("--report_to", type=str, default="tensorboard")
14
+ parser.add_argument("--logging_dir", type=str, default="logs",
15
+ help=("[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
16
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."))
17
+
18
+ # Model
19
+ parser.add_argument("--resolution", type=int, default=96,
20
+ help="The resolution for input images, all the images in the train/validation \
21
+ dataset will be resized to this.")
22
+ parser.add_argument("--unet_channels", type=tuple, default=(64, 128, 256, 512),
23
+ help="The channels of the UNet.")
24
+ parser.add_argument("--style_image_size", type=int, default=96, help="The size of style images.")
25
+ parser.add_argument("--content_image_size", type=int, default=96, help="The size of content images.")
26
+ parser.add_argument("--content_encoder_downsample_size", type=int, default=3,
27
+ help="The downsample size of the content encoder.")
28
+ parser.add_argument("--channel_attn", type=bool, default=True, help="Whether to use the se attention.",)
29
+ parser.add_argument("--content_start_channel", type=int, default=64,
30
+ help="The channels of the fisrt layer output of content encoder.",)
31
+ parser.add_argument("--style_start_channel", type=int, default=64,
32
+ help="The channels of the fisrt layer output of content encoder.",)
33
+
34
+ # Training
35
+ parser.add_argument("--train_batch_size", type=int, default=4,
36
+ help="Batch size (per device) for the training dataloader.")
37
+ ## loss coefficient
38
+ parser.add_argument("--perceptual_coefficient", type=float, default=0.01)
39
+ parser.add_argument("--offset_coefficient", type=float, default=0.5)
40
+ ## step
41
+ parser.add_argument("--max_train_steps", type=int, default=440000,
42
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",)
43
+ parser.add_argument("--ckpt_interval", type=int,default=40000, help="The step begin to validate.")
44
+ parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
45
+ help="Number of updates steps to accumulate before performing a backward/update pass.",)
46
+ parser.add_argument("--log_interval", type=int, default=100, help="The log interval of training.")
47
+ ## learning rate
48
+ parser.add_argument("--learning_rate", type=float, default=1e-4,
49
+ help="Initial learning rate (after the potential warmup period) to use.")
50
+ parser.add_argument("--scale_lr", action="store_true", default=False,
51
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.")
52
+ parser.add_argument("--lr_scheduler", type=str, default="linear",
53
+ help="The scheduler type to use. Choose between 'linear', 'cosine', \
54
+ 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'")
55
+ parser.add_argument("--lr_warmup_steps", type=int, default=10000,
56
+ help="Number of steps for the warmup in the lr scheduler.")
57
+ ## classifier-free
58
+ parser.add_argument("--drop_prob", type=float, default=0.1, help="The uncondition training drop out probability.")
59
+ ## scheduler
60
+ parser.add_argument("--beta_scheduler", type=str, default="scaled_linear", help="The beta scheduler for DDPM.")
61
+ ## optimizer
62
+ parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
63
+ parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
64
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
65
+ parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
66
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
67
+
68
+ parser.add_argument("--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"],
69
+ help="Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires \
70
+ PyTorch >= 1.10. and an Nvidia Ampere GPU.")
71
+
72
+ # Sampling
73
+ parser.add_argument("--algorithm_type", type=str, default="dpmsolver++", help="Algorithm for sampleing.")
74
+ parser.add_argument("--guidance_type", type=str, default="classifier-free", help="Guidance type of sampling.")
75
+ parser.add_argument("--guidance_scale", type=float, default=7.5, help="Guidance scale of the classifier-free mode.")
76
+ parser.add_argument("--num_inference_steps", type=int, default=20, help="Sampling step.")
77
+ parser.add_argument("--model_type", type=str, default="noise", help="model_type for sampling.")
78
+ parser.add_argument("--order", type=int, default=2, help="The order of the dpmsolver.")
79
+ parser.add_argument("--skip_type", type=str, default="time_uniform", help="Skip type of dpmsolver.")
80
+ parser.add_argument("--method", type=str, default="multistep", help="Multistep of dpmsolver.")
81
+ parser.add_argument("--correcting_x0_fn", type=str, default=None, help="correcting_x0_fn of dpmsolver.")
82
+ parser.add_argument("--t_start", type=str, default=None, help="t_start of dpmsolver.")
83
+ parser.add_argument("--t_end", type=str, default=None, help="t_end of dpmsolver.")
84
+
85
+ parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
86
+
87
+ return parser
dataset/font_dataset.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from PIL import Image
4
+
5
+ from torch.utils.data import Dataset
6
+ import torchvision.transforms as transforms
7
+
8
+ def get_nonorm_transform(resolution):
9
+ nonorm_transform = transforms.Compose(
10
+ [transforms.Resize((resolution, resolution),
11
+ interpolation=transforms.InterpolationMode.BILINEAR),
12
+ transforms.ToTensor()])
13
+ return nonorm_transform
14
+
15
+
16
+ class FontDataset(Dataset):
17
+ """The dataset of font generation
18
+ """
19
+ def __init__(self, args, phase, transforms=None):
20
+ super().__init__()
21
+ self.root = args.data_root
22
+ self.phase = phase
23
+
24
+ # Get Data path
25
+ self.get_path()
26
+ self.transforms = transforms
27
+ self.nonorm_transforms = get_nonorm_transform(args.resolution)
28
+
29
+ def get_path(self):
30
+ self.target_images = []
31
+ # images with related style
32
+ self.style_to_images = {}
33
+ target_image_dir = f"{self.root}/{self.phase}/TargetImage"
34
+ for style in os.listdir(target_image_dir):
35
+ images_related_style = []
36
+ for img in os.listdir(f"{target_image_dir}/{style}"):
37
+ img_path = f"{target_image_dir}/{style}/{img}"
38
+ self.target_images.append(img_path)
39
+ images_related_style.append(img_path)
40
+ self.style_to_images[style] = images_related_style
41
+
42
+ def __getitem__(self, index):
43
+ target_image_path = self.target_images[index]
44
+ target_image_name = target_image_path.split('/')[-1]
45
+ style, content = target_image_name.split('.')[0].split('+')
46
+
47
+ # Read content image
48
+ content_image_path = f"{self.root}/{self.phase}/ContentImage/{content}.jpg"
49
+ content_image = Image.open(content_image_path).convert('RGB')
50
+
51
+ # Random sample used for style image
52
+ images_related_style = self.style_to_images[style].copy()
53
+ images_related_style.remove(target_image_path)
54
+ style_image_path = random.choice(images_related_style)
55
+ style_image = Image.open(style_image_path).convert("RGB")
56
+
57
+ # Read target image
58
+ target_image = Image.open(target_image_path).convert("RGB")
59
+ nonorm_target_image = self.nonorm_transforms(target_image)
60
+
61
+ if self.transforms is not None:
62
+ content_image = self.transforms[0](content_image)
63
+ style_image = self.transforms[1](style_image)
64
+ target_image = self.transforms[2](target_image)
65
+
66
+ return content_image, style_image, target_image, nonorm_target_image, target_image_path
67
+
68
+ def __len__(self):
69
+ return len(self.target_images)
figures/ref_imgs/ref_/345/243/244.jpg ADDED
figures/ref_imgs/ref_/345/252/232.jpg ADDED
figures/ref_imgs/ref_/346/252/200.jpg ADDED
figures/ref_imgs/ref_/346/254/237.jpg ADDED
figures/ref_imgs/ref_/347/251/227.jpg ADDED
figures/ref_imgs/ref_/347/261/215.jpg ADDED
figures/ref_imgs/ref_/347/261/215_1.jpg ADDED
figures/ref_imgs/ref_/350/234/223.jpg ADDED
figures/ref_imgs/ref_/350/261/204.jpg ADDED
figures/ref_imgs/ref_/351/227/241.jpg ADDED
figures/ref_imgs/ref_/351/233/225.jpg ADDED
figures/ref_imgs/ref_/351/236/243.jpg ADDED
figures/ref_imgs/ref_/351/246/250.jpg ADDED
figures/ref_imgs/ref_/351/262/270.jpg ADDED
figures/ref_imgs/ref_/351/267/242.jpg ADDED
figures/ref_imgs/ref_/351/271/260.jpg ADDED
figures/source_imgs/source_/347/201/250.jpg ADDED
figures/source_imgs/source_/351/207/205.jpg ADDED
figures/source_imgs/source_/351/221/253.jpg ADDED
figures/source_imgs/source_/351/221/273.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ transformers==4.33.1
2
+ accelerate==0.23.0
3
+ diffusers==0.22.0.dev0
4
+ gradio==4.8.0
5
+ yaml
sample.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import time
4
+ import random
5
+ import numpy as np
6
+ from PIL import Image
7
+
8
+ import torch
9
+ import torchvision.transforms as transforms
10
+ from accelerate.utils import set_seed
11
+
12
+ from src import (FontDiffuserDPMPipeline,
13
+ FontDiffuserModelDPM,
14
+ build_ddpm_scheduler,
15
+ build_unet,
16
+ build_content_encoder,
17
+ build_style_encoder)
18
+ from utils import (ttf2im,
19
+ load_ttf,
20
+ is_char_in_font,
21
+ save_args_to_yaml,
22
+ save_single_image,
23
+ save_image_with_content_style)
24
+
25
+
26
+ def arg_parse():
27
+ from configs.fontdiffuser import get_parser
28
+
29
+ parser = get_parser()
30
+ parser.add_argument("--ckpt_dir", type=str, default=None)
31
+ parser.add_argument("--demo", action="store_true")
32
+ parser.add_argument("--controlnet", type=bool, default=False,
33
+ help="If in demo mode, the controlnet can be added.")
34
+ parser.add_argument("--character_input", action="store_true")
35
+ parser.add_argument("--content_character", type=str, default=None)
36
+ parser.add_argument("--content_image_path", type=str, default=None)
37
+ parser.add_argument("--style_image_path", type=str, default=None)
38
+ parser.add_argument("--save_image", action="store_true")
39
+ parser.add_argument("--save_image_dir", type=str, default=None,
40
+ help="The saving directory.")
41
+ parser.add_argument("--device", type=str, default="cuda:0")
42
+ parser.add_argument("--ttf_path", type=str, default="ttf/KaiXinSongA.ttf")
43
+ args = parser.parse_args()
44
+ style_image_size = args.style_image_size
45
+ content_image_size = args.content_image_size
46
+ args.style_image_size = (style_image_size, style_image_size)
47
+ args.content_image_size = (content_image_size, content_image_size)
48
+
49
+ return args
50
+
51
+
52
+ def image_process(args, content_image=None, style_image=None):
53
+ if not args.demo:
54
+ # Read content image and style image
55
+ if args.character_input:
56
+ assert args.content_character is not None, "The content_character should not be None."
57
+ if not is_char_in_font(font_path=args.ttf_path, char=args.content_character):
58
+ return None, None
59
+ font = load_ttf(ttf_path=args.ttf_path)
60
+ content_image = ttf2im(font=font, char=args.content_character)
61
+ content_image_pil = content_image.copy()
62
+ else:
63
+ content_image = Image.open(args.content_image_path).convert('RGB')
64
+ content_image_pil = None
65
+ style_image = Image.open(args.style_image_path).convert('RGB')
66
+ else:
67
+ assert style_image is not None, "The style image should not be None."
68
+ if args.character_input:
69
+ assert args.content_character is not None, "The content_character should not be None."
70
+ if not is_char_in_font(font_path=args.ttf_path, char=args.content_character):
71
+ return None, None
72
+ font = load_ttf(ttf_path=args.ttf_path)
73
+ content_image = ttf2im(font=font, char=args.content_character)
74
+ else:
75
+ assert content_image is not None, "The content image should not be None."
76
+ content_image_pil = None
77
+
78
+ ## Dataset transform
79
+ content_inference_transforms = transforms.Compose(
80
+ [transforms.Resize(args.content_image_size, \
81
+ interpolation=transforms.InterpolationMode.BILINEAR),
82
+ transforms.ToTensor(),
83
+ transforms.Normalize([0.5], [0.5])])
84
+ style_inference_transforms = transforms.Compose(
85
+ [transforms.Resize(args.style_image_size, \
86
+ interpolation=transforms.InterpolationMode.BILINEAR),
87
+ transforms.ToTensor(),
88
+ transforms.Normalize([0.5], [0.5])])
89
+ content_image = content_inference_transforms(content_image)[None, :]
90
+ style_image = style_inference_transforms(style_image)[None, :]
91
+
92
+ return content_image, style_image, content_image_pil
93
+
94
+ def load_fontdiffuer_pipeline(args):
95
+ # Load the model state_dict
96
+ unet = build_unet(args=args)
97
+ unet.load_state_dict(torch.load(f"{args.ckpt_dir}/unet.pth"))
98
+ style_encoder = build_style_encoder(args=args)
99
+ style_encoder.load_state_dict(torch.load(f"{args.ckpt_dir}/style_encoder.pth"))
100
+ content_encoder = build_content_encoder(args=args)
101
+ content_encoder.load_state_dict(torch.load(f"{args.ckpt_dir}/content_encoder.pth"))
102
+ model = FontDiffuserModelDPM(
103
+ unet=unet,
104
+ style_encoder=style_encoder,
105
+ content_encoder=content_encoder)
106
+ model.to(args.device)
107
+ print("Loaded the model state_dict successfully!")
108
+
109
+ # Load the training ddpm_scheduler.
110
+ train_scheduler = build_ddpm_scheduler(args=args)
111
+ print("Loaded training DDPM scheduler sucessfully!")
112
+
113
+ # Load the DPM_Solver to generate the sample.
114
+ pipe = FontDiffuserDPMPipeline(
115
+ model=model,
116
+ ddpm_train_scheduler=train_scheduler,
117
+ model_type=args.model_type,
118
+ guidance_type=args.guidance_type,
119
+ guidance_scale=args.guidance_scale,
120
+ )
121
+ print("Loaded dpm_solver pipeline sucessfully!")
122
+
123
+ return pipe
124
+
125
+
126
+ def sampling(args, pipe, content_image=None, style_image=None):
127
+ if not args.demo:
128
+ os.makedirs(args.save_image_dir, exist_ok=True)
129
+ # saving sampling config
130
+ save_args_to_yaml(args=args, output_file=f"{args.save_image_dir}/sampling_config.yaml")
131
+
132
+ if args.seed:
133
+ set_seed(seed=args.seed)
134
+
135
+ content_image, style_image, content_image_pil = image_process(args=args,
136
+ content_image=content_image,
137
+ style_image=style_image)
138
+ if content_image == None:
139
+ print(f"The content_character you provided is not in the ttf. \
140
+ Please change the content_character or you can change the ttf.")
141
+ return None
142
+
143
+ with torch.no_grad():
144
+ content_image = content_image.to(args.device)
145
+ style_image = style_image.to(args.device)
146
+ print(f"Sampling by DPM-Solver++ ......")
147
+ start = time.time()
148
+ images = pipe.generate(
149
+ content_images=content_image,
150
+ style_images=style_image,
151
+ batch_size=1,
152
+ order=args.order,
153
+ num_inference_step=args.num_inference_steps,
154
+ content_encoder_downsample_size=args.content_encoder_downsample_size,
155
+ t_start=args.t_start,
156
+ t_end=args.t_end,
157
+ dm_size=args.content_image_size,
158
+ algorithm_type=args.algorithm_type,
159
+ skip_type=args.skip_type,
160
+ method=args.method,
161
+ correcting_x0_fn=args.correcting_x0_fn)
162
+ end = time.time()
163
+
164
+ if args.save_image:
165
+ print(f"Saving the image ......")
166
+ save_single_image(save_dir=args.save_image_dir, image=images[0])
167
+ if args.character_input:
168
+ save_image_with_content_style(save_dir=args.save_image_dir,
169
+ image=images[0],
170
+ content_image_pil=content_image_pil,
171
+ content_image_path=None,
172
+ style_image_path=args.style_image_path,
173
+ resolution=args.resolution)
174
+ else:
175
+ save_image_with_content_style(save_dir=args.save_image_dir,
176
+ image=images[0],
177
+ content_image_pil=None,
178
+ content_image_path=args.content_image_path,
179
+ style_image_path=args.style_image_path,
180
+ resolution=args.resolution)
181
+ print(f"Finish the sampling process, costing time {end - start}s")
182
+ return images[0]
183
+
184
+
185
+ def load_controlnet_pipeline(args,
186
+ config_path="lllyasviel/sd-controlnet-canny",
187
+ ckpt_path="runwayml/stable-diffusion-v1-5"):
188
+ from diffusers import ControlNetModel, AutoencoderKL
189
+ # load controlnet model and pipeline
190
+ from diffusers import StableDiffusionControlNetPipeline, UniPCMultistepScheduler
191
+ controlnet = ControlNetModel.from_pretrained(config_path,
192
+ torch_dtype=torch.float16,
193
+ cache_dir=f"{args.ckpt_dir}/controlnet")
194
+ print(f"Loaded ControlNet Model Successfully!")
195
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(ckpt_path,
196
+ controlnet=controlnet,
197
+ torch_dtype=torch.float16,
198
+ cache_dir=f"{args.ckpt_dir}/controlnet_pipeline")
199
+ # faster
200
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
201
+ pipe.enable_model_cpu_offload()
202
+ print(f"Loaded ControlNet Pipeline Successfully!")
203
+
204
+ return pipe
205
+
206
+
207
+ def controlnet(text_prompt,
208
+ pil_image,
209
+ pipe):
210
+ image = np.array(pil_image)
211
+ # get canny image
212
+ image = cv2.Canny(image=image, threshold1=100, threshold2=200)
213
+ image = image[:, :, None]
214
+ image = np.concatenate([image, image, image], axis=2)
215
+ canny_image = Image.fromarray(image)
216
+
217
+ seed = random.randint(0, 10000)
218
+ generator = torch.manual_seed(seed)
219
+ image = pipe(text_prompt,
220
+ num_inference_steps=50,
221
+ generator=generator,
222
+ image=canny_image,
223
+ output_type='pil').images[0]
224
+ return image
225
+
226
+
227
+ def load_instructpix2pix_pipeline(args,
228
+ ckpt_path="timbrooks/instruct-pix2pix"):
229
+ from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
230
+ pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(ckpt_path,
231
+ torch_dtype=torch.float16)
232
+ pipe.to(args.device)
233
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
234
+
235
+ return pipe
236
+
237
+ def instructpix2pix(pil_image, text_prompt, pipe):
238
+ image = pil_image.resize((512, 512))
239
+ seed = random.randint(0, 10000)
240
+ generator = torch.manual_seed(seed)
241
+ image = pipe(prompt=text_prompt, image=image, generator=generator,
242
+ num_inference_steps=20, image_guidance_scale=1.1).images[0]
243
+
244
+ return image
245
+
246
+
247
+ if __name__=="__main__":
248
+ args = arg_parse()
249
+
250
+ # load fontdiffuser pipeline
251
+ pipe = load_fontdiffuer_pipeline(args=args)
252
+ out_image = sampling(args=args, pipe=pipe)
src/.DS_Store ADDED
Binary file (6.15 kB). View file
 
src/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .model import (FontDiffuserModel,
2
+ FontDiffuserModelDPM)
3
+ from .criterion import ContentPerceptualLoss
4
+ from .dpm_solver.pipeline_dpm_solver import FontDiffuserDPMPipeline
5
+ from .modules import (ContentEncoder,
6
+ StyleEncoder,
7
+ UNet)
8
+ from .build import (build_unet,
9
+ build_ddpm_scheduler,
10
+ build_style_encoder,
11
+ build_content_encoder)
src/build.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers.schedulers.scheduling_ddpm import DDPMScheduler
2
+ from src import (ContentEncoder,
3
+ StyleEncoder,
4
+ UNet)
5
+
6
+
7
+ def build_unet(args):
8
+ unet = UNet(
9
+ sample_size=args.resolution,
10
+ in_channels=3,
11
+ out_channels=3,
12
+ flip_sin_to_cos=True,
13
+ freq_shift=0,
14
+ down_block_types=('DownBlock2D',
15
+ 'MCADownBlock2D',
16
+ 'MCADownBlock2D',
17
+ 'DownBlock2D'),
18
+ up_block_types=('UpBlock2D',
19
+ 'StyleRSIUpBlock2D',
20
+ 'StyleRSIUpBlock2D',
21
+ 'UpBlock2D'),
22
+ block_out_channels=args.unet_channels,
23
+ layers_per_block=2,
24
+ downsample_padding=1,
25
+ mid_block_scale_factor=1,
26
+ act_fn='silu',
27
+ norm_num_groups=32,
28
+ norm_eps=1e-05,
29
+ cross_attention_dim=args.style_start_channel * 16,
30
+ attention_head_dim=1,
31
+ channel_attn=args.channel_attn,
32
+ content_encoder_downsample_size=args.content_encoder_downsample_size,
33
+ content_start_channel=args.content_start_channel,
34
+ reduction=32)
35
+
36
+ return unet
37
+
38
+
39
+ def build_style_encoder(args):
40
+ style_image_encoder = StyleEncoder(
41
+ G_ch=args.style_start_channel,
42
+ resolution=args.style_image_size[0])
43
+ print("Get CG-GAN Style Encoder!")
44
+ return style_image_encoder
45
+
46
+
47
+ def build_content_encoder(args):
48
+ content_image_encoder = ContentEncoder(
49
+ G_ch=args.content_start_channel,
50
+ resolution=args.content_image_size[0])
51
+ print("Get CG-GAN Content Encoder!")
52
+ return content_image_encoder
53
+
54
+
55
+ def build_ddpm_scheduler(args):
56
+ ddpm_scheduler = DDPMScheduler(
57
+ num_train_timesteps=1000,
58
+ beta_start=0.0001,
59
+ beta_end=0.02,
60
+ beta_schedule=args.beta_scheduler,
61
+ trained_betas=None,
62
+ variance_type="fixed_small",
63
+ clip_sample=True)
64
+ return ddpm_scheduler
src/criterion.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torchvision
4
+
5
+
6
+ class VGG16(nn.Module):
7
+ def __init__(self):
8
+ super(VGG16, self).__init__()
9
+ vgg16 = torchvision.models.vgg16(pretrained=True)
10
+
11
+ self.enc_1 = nn.Sequential(*vgg16.features[:5])
12
+ self.enc_2 = nn.Sequential(*vgg16.features[5:10])
13
+ self.enc_3 = nn.Sequential(*vgg16.features[10:17])
14
+
15
+ for i in range(3):
16
+ for param in getattr(self, f'enc_{i+1:d}').parameters():
17
+ param.requires_grad = False
18
+
19
+ def forward(self, image):
20
+ results = [image]
21
+ for i in range(3):
22
+ func = getattr(self, f'enc_{i+1:d}')
23
+ results.append(func(results[-1]))
24
+ return results[1:]
25
+
26
+
27
+ class ContentPerceptualLoss(nn.Module):
28
+
29
+ def __init__(self):
30
+ super().__init__()
31
+ self.VGG = VGG16()
32
+
33
+ def calculate_loss(self, generated_images, target_images, device):
34
+ self.VGG = self.VGG.to(device)
35
+
36
+ generated_features = self.VGG(generated_images)
37
+ target_features = self.VGG(target_images)
38
+
39
+ perceptual_loss = 0
40
+ perceptual_loss += torch.mean((target_features[0] - generated_features[0]) ** 2)
41
+ perceptual_loss += torch.mean((target_features[1] - generated_features[1]) ** 2)
42
+ perceptual_loss += torch.mean((target_features[2] - generated_features[2]) ** 2)
43
+ perceptual_loss /= 3
44
+ return perceptual_loss
src/dpm_solver/dpm_solver_pytorch.py ADDED
@@ -0,0 +1,1332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import math
4
+
5
+
6
+ class NoiseScheduleVP:
7
+ def __init__(
8
+ self,
9
+ schedule='discrete',
10
+ betas=None,
11
+ alphas_cumprod=None,
12
+ continuous_beta_0=0.1,
13
+ continuous_beta_1=20.,
14
+ dtype=torch.float32,
15
+ ):
16
+ """Create a wrapper class for the forward SDE (VP type).
17
+
18
+ ***
19
+ Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
20
+ We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
21
+ ***
22
+
23
+ The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
24
+ We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
25
+ Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
26
+
27
+ log_alpha_t = self.marginal_log_mean_coeff(t)
28
+ sigma_t = self.marginal_std(t)
29
+ lambda_t = self.marginal_lambda(t)
30
+
31
+ Moreover, as lambda(t) is an invertible function, we also support its inverse function:
32
+
33
+ t = self.inverse_lambda(lambda_t)
34
+
35
+ ===============================================================
36
+
37
+ We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
38
+
39
+ 1. For discrete-time DPMs:
40
+
41
+ For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
42
+ t_i = (i + 1) / N
43
+ e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
44
+ We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
45
+
46
+ Args:
47
+ betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
48
+ alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
49
+
50
+ Note that we always have alphas_cumprod = cumprod(1 - betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
51
+
52
+ **Important**: Please pay special attention for the args for `alphas_cumprod`:
53
+ The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
54
+ q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
55
+ Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
56
+ alpha_{t_n} = \sqrt{\hat{alpha_n}},
57
+ and
58
+ log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
59
+
60
+
61
+ 2. For continuous-time DPMs:
62
+
63
+ We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
64
+ schedule are the default settings in DDPM and improved-DDPM:
65
+
66
+ Args:
67
+ beta_min: A `float` number. The smallest beta for the linear schedule.
68
+ beta_max: A `float` number. The largest beta for the linear schedule.
69
+ cosine_s: A `float` number. The hyperparameter in the cosine schedule.
70
+ cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
71
+ T: A `float` number. The ending time of the forward process.
72
+
73
+ ===============================================================
74
+
75
+ Args:
76
+ schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
77
+ 'linear' or 'cosine' for continuous-time DPMs.
78
+ Returns:
79
+ A wrapper object of the forward SDE (VP type).
80
+
81
+ ===============================================================
82
+
83
+ Example:
84
+
85
+ # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
86
+ >>> ns = NoiseScheduleVP('discrete', betas=betas)
87
+
88
+ # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
89
+ >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
90
+
91
+ # For continuous-time DPMs (VPSDE), linear schedule:
92
+ >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
93
+
94
+ """
95
+
96
+ if schedule not in ['discrete', 'linear', 'cosine']:
97
+ raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule))
98
+
99
+ self.schedule = schedule
100
+ if schedule == 'discrete':
101
+ if betas is not None:
102
+ log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
103
+ else:
104
+ assert alphas_cumprod is not None
105
+ log_alphas = 0.5 * torch.log(alphas_cumprod)
106
+ self.total_N = len(log_alphas)
107
+ self.T = 1.
108
+ self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)).to(dtype=dtype)
109
+ self.log_alpha_array = log_alphas.reshape((1, -1,)).to(dtype=dtype)
110
+ else:
111
+ self.total_N = 1000
112
+ self.beta_0 = continuous_beta_0
113
+ self.beta_1 = continuous_beta_1
114
+ self.cosine_s = 0.008
115
+ self.cosine_beta_max = 999.
116
+ self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
117
+ self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
118
+ self.schedule = schedule
119
+ if schedule == 'cosine':
120
+ # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
121
+ # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
122
+ self.T = 0.9946
123
+ else:
124
+ self.T = 1.
125
+
126
+ def marginal_log_mean_coeff(self, t):
127
+ """
128
+ Compute log(alpha_t) of a given continuous-time label t in [0, T].
129
+ """
130
+ if self.schedule == 'discrete':
131
+ return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1))
132
+ elif self.schedule == 'linear':
133
+ return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
134
+ elif self.schedule == 'cosine':
135
+ log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
136
+ log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
137
+ return log_alpha_t
138
+
139
+ def marginal_alpha(self, t):
140
+ """
141
+ Compute alpha_t of a given continuous-time label t in [0, T].
142
+ """
143
+ return torch.exp(self.marginal_log_mean_coeff(t))
144
+
145
+ def marginal_std(self, t):
146
+ """
147
+ Compute sigma_t of a given continuous-time label t in [0, T].
148
+ """
149
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
150
+
151
+ def marginal_lambda(self, t):
152
+ """
153
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
154
+ """
155
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
156
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
157
+ return log_mean_coeff - log_std
158
+
159
+ def inverse_lambda(self, lamb):
160
+ """
161
+ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
162
+ """
163
+ if self.schedule == 'linear':
164
+ tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
165
+ Delta = self.beta_0**2 + tmp
166
+ return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
167
+ elif self.schedule == 'discrete':
168
+ log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
169
+ t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))
170
+ return t.reshape((-1,))
171
+ else:
172
+ log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
173
+ t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
174
+ t = t_fn(log_alpha)
175
+ return t
176
+
177
+
178
+ def model_wrapper(
179
+ model,
180
+ noise_schedule,
181
+ model_type="noise",
182
+ model_kwargs={},
183
+ guidance_type="uncond",
184
+ condition=None,
185
+ unconditional_condition=None,
186
+ guidance_scale=1.,
187
+ classifier_fn=None,
188
+ classifier_kwargs={},
189
+ ):
190
+ """Create a wrapper function for the noise prediction model.
191
+
192
+ DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
193
+ firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
194
+
195
+ We support four types of the diffusion model by setting `model_type`:
196
+
197
+ 1. "noise": noise prediction model. (Trained by predicting noise).
198
+
199
+ 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
200
+
201
+ 3. "v": velocity prediction model. (Trained by predicting the velocity).
202
+ The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
203
+
204
+ [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
205
+ arXiv preprint arXiv:2202.00512 (2022).
206
+ [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
207
+ arXiv preprint arXiv:2210.02303 (2022).
208
+
209
+ 4. "score": marginal score function. (Trained by denoising score matching).
210
+ Note that the score function and the noise prediction model follows a simple relationship:
211
+ ```
212
+ noise(x_t, t) = -sigma_t * score(x_t, t)
213
+ ```
214
+
215
+ We support three types of guided sampling by DPMs by setting `guidance_type`:
216
+ 1. "uncond": unconditional sampling by DPMs.
217
+ The input `model` has the following format:
218
+ ``
219
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
220
+ ``
221
+
222
+ 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
223
+ The input `model` has the following format:
224
+ ``
225
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
226
+ ``
227
+
228
+ The input `classifier_fn` has the following format:
229
+ ``
230
+ classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
231
+ ``
232
+
233
+ [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
234
+ in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
235
+
236
+ 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
237
+ The input `model` has the following format:
238
+ ``
239
+ model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
240
+ ``
241
+ And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
242
+
243
+ [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
244
+ arXiv preprint arXiv:2207.12598 (2022).
245
+
246
+
247
+ The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
248
+ or continuous-time labels (i.e. epsilon to T).
249
+
250
+ We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
251
+ ``
252
+ def model_fn(x, t_continuous) -> noise:
253
+ t_input = get_model_input_time(t_continuous)
254
+ return noise_pred(model, x, t_input, **model_kwargs)
255
+ ``
256
+ where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
257
+
258
+ ===============================================================
259
+
260
+ Args:
261
+ model: A diffusion model with the corresponding format described above.
262
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
263
+ model_type: A `str`. The parameterization type of the diffusion model.
264
+ "noise" or "x_start" or "v" or "score".
265
+ model_kwargs: A `dict`. A dict for the other inputs of the model function.
266
+ guidance_type: A `str`. The type of the guidance for sampling.
267
+ "uncond" or "classifier" or "classifier-free".
268
+ condition: A pytorch tensor. The condition for the guided sampling.
269
+ Only used for "classifier" or "classifier-free" guidance type.
270
+ unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
271
+ Only used for "classifier-free" guidance type.
272
+ guidance_scale: A `float`. The scale for the guided sampling.
273
+ classifier_fn: A classifier function. Only used for the classifier guidance.
274
+ classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
275
+ Returns:
276
+ A noise prediction model that accepts the noised data and the continuous time as the inputs.
277
+ """
278
+
279
+ def get_model_input_time(t_continuous):
280
+ """
281
+ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
282
+ For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
283
+ For continuous-time DPMs, we just use `t_continuous`.
284
+ """
285
+ if noise_schedule.schedule == 'discrete':
286
+ return (t_continuous - 1. / noise_schedule.total_N) * 1000.
287
+ else:
288
+ return t_continuous
289
+
290
+ def noise_pred_fn(x, t_continuous, cond=None):
291
+ t_input = get_model_input_time(t_continuous)
292
+ if cond is None:
293
+ output = model(x, t_input, **model_kwargs)
294
+ else:
295
+ output = model(x, t_input, cond, **model_kwargs)
296
+ if model_type == "noise":
297
+ return output
298
+ elif model_type == "x_start":
299
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
300
+ return (x - alpha_t * output) / sigma_t
301
+ elif model_type == "v":
302
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
303
+ return alpha_t * output + sigma_t * x
304
+ elif model_type == "score":
305
+ sigma_t = noise_schedule.marginal_std(t_continuous)
306
+ return -sigma_t * output
307
+
308
+ def cond_grad_fn(x, t_input):
309
+ """
310
+ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
311
+ """
312
+ with torch.enable_grad():
313
+ x_in = x.detach().requires_grad_(True)
314
+ log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
315
+ return torch.autograd.grad(log_prob.sum(), x_in)[0]
316
+
317
+ def model_fn(x, t_continuous):
318
+ """
319
+ The noise predicition model function that is used for DPM-Solver.
320
+ """
321
+ if guidance_type == "uncond":
322
+ return noise_pred_fn(x, t_continuous)
323
+ elif guidance_type == "classifier":
324
+ assert classifier_fn is not None
325
+ t_input = get_model_input_time(t_continuous)
326
+ cond_grad = cond_grad_fn(x, t_input)
327
+ sigma_t = noise_schedule.marginal_std(t_continuous)
328
+ noise = noise_pred_fn(x, t_continuous)
329
+ return noise - guidance_scale * sigma_t * cond_grad
330
+ elif guidance_type == "classifier-free":
331
+ if guidance_scale == 1. or unconditional_condition is None:
332
+ return noise_pred_fn(x, t_continuous, cond=condition)
333
+ elif model_kwargs["version"] == "V1" or model_kwargs["version"] == "V2_ConStyle" or model_kwargs["version"] == "V3": # add this
334
+ x_in = torch.cat([x] * 2)
335
+ t_in = torch.cat([t_continuous] * 2)
336
+ c_in = []
337
+ c_in.append(torch.cat([unconditional_condition[0], condition[0]], dim=0))
338
+ c_in.append(torch.cat([unconditional_condition[1], condition[1]], dim=0))
339
+ noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
340
+ return noise_uncond + guidance_scale * (noise - noise_uncond)
341
+ elif model_kwargs["version"] == "FG_Sep":
342
+ x_in = torch.cat([x] * 3)
343
+ t_in = torch.cat([t_continuous] * 3)
344
+ c_in = []
345
+ c_in.append(torch.cat([unconditional_condition[0], unconditional_condition[0], condition[0]], dim=0))
346
+ c_in.append(torch.cat([unconditional_condition[1], condition[1], unconditional_condition[1]], dim=0))
347
+ noise_uncond, noise_cond_style, noise_cond_content = noise_pred_fn(x_in, t_in, cond=c_in).chunk(3)
348
+
349
+ style_guidance_scale = guidance_scale[0]
350
+ content_guidance_scale = guidance_scale[1]
351
+ return noise_uncond + style_guidance_scale * (noise_cond_style - noise_uncond) + content_guidance_scale * (noise_cond_content - noise_uncond)
352
+ else:
353
+ x_in = torch.cat([x] * 2)
354
+ t_in = torch.cat([t_continuous] * 2)
355
+ c_in = torch.cat([unconditional_condition, condition])
356
+ noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
357
+ return noise_uncond + guidance_scale * (noise - noise_uncond)
358
+
359
+ assert model_type in ["noise", "x_start", "v"]
360
+ assert guidance_type in ["uncond", "classifier", "classifier-free"]
361
+ return model_fn
362
+
363
+
364
+ class DPM_Solver:
365
+ def __init__(
366
+ self,
367
+ model_fn,
368
+ noise_schedule,
369
+ algorithm_type="dpmsolver++",
370
+ correcting_x0_fn=None,
371
+ correcting_xt_fn=None,
372
+ thresholding_max_val=1.,
373
+ dynamic_thresholding_ratio=0.995,
374
+ ):
375
+ """Construct a DPM-Solver.
376
+
377
+ We support both DPM-Solver (`algorithm_type="dpmsolver"`) and DPM-Solver++ (`algorithm_type="dpmsolver++"`).
378
+
379
+ We also support the "dynamic thresholding" method in Imagen[1]. For pixel-space diffusion models, you
380
+ can set both `algorithm_type="dpmsolver++"` and `correcting_x0_fn="dynamic_thresholding"` to use the
381
+ dynamic thresholding. The "dynamic thresholding" can greatly improve the sample quality for pixel-space
382
+ DPMs with large guidance scales. Note that the thresholding method is **unsuitable** for latent-space
383
+ DPMs (such as stable-diffusion).
384
+
385
+ To support advanced algorithms in image-to-image applications, we also support corrector functions for
386
+ both x0 and xt.
387
+
388
+ Args:
389
+ model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
390
+ ``
391
+ def model_fn(x, t_continuous):
392
+ return noise
393
+ ``
394
+ The shape of `x` is `(batch_size, **shape)`, and the shape of `t_continuous` is `(batch_size,)`.
395
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
396
+ algorithm_type: A `str`. Either "dpmsolver" or "dpmsolver++".
397
+ correcting_x0_fn: A `str` or a function with the following format:
398
+ ```
399
+ def correcting_x0_fn(x0, t):
400
+ x0_new = ...
401
+ return x0_new
402
+ ```
403
+ This function is to correct the outputs of the data prediction model at each sampling step. e.g.,
404
+ ```
405
+ x0_pred = data_pred_model(xt, t)
406
+ if correcting_x0_fn is not None:
407
+ x0_pred = correcting_x0_fn(x0_pred, t)
408
+ xt_1 = update(x0_pred, xt, t)
409
+ ```
410
+ If `correcting_x0_fn="dynamic_thresholding"`, we use the dynamic thresholding proposed in Imagen[1].
411
+ correcting_xt_fn: A function with the following format:
412
+ ```
413
+ def correcting_xt_fn(xt, t, step):
414
+ x_new = ...
415
+ return x_new
416
+ ```
417
+ This function is to correct the intermediate samples xt at each sampling step. e.g.,
418
+ ```
419
+ xt = ...
420
+ xt = correcting_xt_fn(xt, t, step)
421
+ ```
422
+ thresholding_max_val: A `float`. The max value for thresholding.
423
+ Valid only when use `dpmsolver++` and `correcting_x0_fn="dynamic_thresholding"`.
424
+ dynamic_thresholding_ratio: A `float`. The ratio for dynamic thresholding (see Imagen[1] for details).
425
+ Valid only when use `dpmsolver++` and `correcting_x0_fn="dynamic_thresholding"`.
426
+
427
+ [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour,
428
+ Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models
429
+ with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
430
+ """
431
+ self.model = lambda x, t: model_fn(x, t.expand((x.shape[0])))
432
+ self.noise_schedule = noise_schedule
433
+ assert algorithm_type in ["dpmsolver", "dpmsolver++"]
434
+ self.algorithm_type = algorithm_type
435
+ if correcting_x0_fn == "dynamic_thresholding":
436
+ self.correcting_x0_fn = self.dynamic_thresholding_fn
437
+ else:
438
+ self.correcting_x0_fn = correcting_x0_fn
439
+ self.correcting_xt_fn = correcting_xt_fn
440
+ self.dynamic_thresholding_ratio = dynamic_thresholding_ratio
441
+ self.thresholding_max_val = thresholding_max_val
442
+
443
+ def dynamic_thresholding_fn(self, x0):
444
+ """
445
+ The dynamic thresholding method.
446
+ """
447
+ dims = x0.dim()
448
+ p = self.dynamic_thresholding_ratio
449
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
450
+ s = expand_dims(torch.maximum(s, self.thresholding_max_val * torch.ones_like(s).to(s.device)), dims)
451
+ x0 = torch.clamp(x0, -s, s) / s
452
+ return x0
453
+
454
+ def noise_prediction_fn(self, x, t):
455
+ """
456
+ Return the noise prediction model.
457
+ """
458
+ return self.model(x, t)
459
+
460
+ def data_prediction_fn(self, x, t):
461
+ """
462
+ Return the data prediction model (with corrector).
463
+ """
464
+ noise = self.noise_prediction_fn(x, t)
465
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
466
+ x0 = (x - sigma_t * noise) / alpha_t
467
+ if self.correcting_x0_fn is not None:
468
+ x0 = self.correcting_x0_fn(x0)
469
+ return x0
470
+
471
+ def model_fn(self, x, t):
472
+ """
473
+ Convert the model to the noise prediction model or the data prediction model.
474
+ """
475
+ if self.algorithm_type == "dpmsolver++":
476
+ return self.data_prediction_fn(x, t)
477
+ else:
478
+ return self.noise_prediction_fn(x, t)
479
+
480
+ def get_time_steps(self, skip_type, t_T, t_0, N, device):
481
+ """Compute the intermediate time steps for sampling.
482
+
483
+ Args:
484
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
485
+ - 'logSNR': uniform logSNR for the time steps.
486
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
487
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
488
+ t_T: A `float`. The starting time of the sampling (default is T).
489
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
490
+ N: A `int`. The total number of the spacing of the time steps.
491
+ device: A torch device.
492
+ Returns:
493
+ A pytorch tensor of the time steps, with the shape (N + 1,).
494
+ """
495
+ if skip_type == 'logSNR':
496
+ lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
497
+ lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
498
+ logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
499
+ return self.noise_schedule.inverse_lambda(logSNR_steps)
500
+ elif skip_type == 'time_uniform':
501
+ return torch.linspace(t_T, t_0, N + 1).to(device)
502
+ elif skip_type == 'time_quadratic':
503
+ t_order = 2
504
+ t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)
505
+ return t
506
+ else:
507
+ raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
508
+
509
+ def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
510
+ """
511
+ Get the order of each step for sampling by the singlestep DPM-Solver.
512
+
513
+ We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
514
+ Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
515
+ - If order == 1:
516
+ We take `steps` of DPM-Solver-1 (i.e. DDIM).
517
+ - If order == 2:
518
+ - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
519
+ - If steps % 2 == 0, we use K steps of DPM-Solver-2.
520
+ - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
521
+ - If order == 3:
522
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
523
+ - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
524
+ - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
525
+ - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
526
+
527
+ ============================================
528
+ Args:
529
+ order: A `int`. The max order for the solver (2 or 3).
530
+ steps: A `int`. The total number of function evaluations (NFE).
531
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
532
+ - 'logSNR': uniform logSNR for the time steps.
533
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
534
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
535
+ t_T: A `float`. The starting time of the sampling (default is T).
536
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
537
+ device: A torch device.
538
+ Returns:
539
+ orders: A list of the solver order of each step.
540
+ """
541
+ if order == 3:
542
+ K = steps // 3 + 1
543
+ if steps % 3 == 0:
544
+ orders = [3,] * (K - 2) + [2, 1]
545
+ elif steps % 3 == 1:
546
+ orders = [3,] * (K - 1) + [1]
547
+ else:
548
+ orders = [3,] * (K - 1) + [2]
549
+ elif order == 2:
550
+ if steps % 2 == 0:
551
+ K = steps // 2
552
+ orders = [2,] * K
553
+ else:
554
+ K = steps // 2 + 1
555
+ orders = [2,] * (K - 1) + [1]
556
+ elif order == 1:
557
+ K = 1
558
+ orders = [1,] * steps
559
+ else:
560
+ raise ValueError("'order' must be '1' or '2' or '3'.")
561
+ if skip_type == 'logSNR':
562
+ # To reproduce the results in DPM-Solver paper
563
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
564
+ else:
565
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders), 0).to(device)]
566
+ return timesteps_outer, orders
567
+
568
+ def denoise_to_zero_fn(self, x, s):
569
+ """
570
+ Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
571
+ """
572
+ return self.data_prediction_fn(x, s)
573
+
574
+ def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
575
+ """
576
+ DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
577
+
578
+ Args:
579
+ x: A pytorch tensor. The initial value at time `s`.
580
+ s: A pytorch tensor. The starting time, with the shape (1,).
581
+ t: A pytorch tensor. The ending time, with the shape (1,).
582
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
583
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
584
+ return_intermediate: A `bool`. If true, also return the model value at time `s`.
585
+ Returns:
586
+ x_t: A pytorch tensor. The approximated solution at time `t`.
587
+ """
588
+ ns = self.noise_schedule
589
+ dims = x.dim()
590
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
591
+ h = lambda_t - lambda_s
592
+ log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
593
+ sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
594
+ alpha_t = torch.exp(log_alpha_t)
595
+
596
+ if self.algorithm_type == "dpmsolver++":
597
+ phi_1 = torch.expm1(-h)
598
+ if model_s is None:
599
+ model_s = self.model_fn(x, s)
600
+ x_t = (
601
+ sigma_t / sigma_s * x
602
+ - alpha_t * phi_1 * model_s
603
+ )
604
+ if return_intermediate:
605
+ return x_t, {'model_s': model_s}
606
+ else:
607
+ return x_t
608
+ else:
609
+ phi_1 = torch.expm1(h)
610
+ if model_s is None:
611
+ model_s = self.model_fn(x, s)
612
+ x_t = (
613
+ torch.exp(log_alpha_t - log_alpha_s) * x
614
+ - (sigma_t * phi_1) * model_s
615
+ )
616
+ if return_intermediate:
617
+ return x_t, {'model_s': model_s}
618
+ else:
619
+ return x_t
620
+
621
+ def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpmsolver'):
622
+ """
623
+ Singlestep solver DPM-Solver-2 from time `s` to time `t`.
624
+
625
+ Args:
626
+ x: A pytorch tensor. The initial value at time `s`.
627
+ s: A pytorch tensor. The starting time, with the shape (1,).
628
+ t: A pytorch tensor. The ending time, with the shape (1,).
629
+ r1: A `float`. The hyperparameter of the second-order solver.
630
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
631
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
632
+ return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
633
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
634
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
635
+ Returns:
636
+ x_t: A pytorch tensor. The approximated solution at time `t`.
637
+ """
638
+ if solver_type not in ['dpmsolver', 'taylor']:
639
+ raise ValueError("'solver_type' must be either 'dpmsolver' or 'taylor', got {}".format(solver_type))
640
+ if r1 is None:
641
+ r1 = 0.5
642
+ ns = self.noise_schedule
643
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
644
+ h = lambda_t - lambda_s
645
+ lambda_s1 = lambda_s + r1 * h
646
+ s1 = ns.inverse_lambda(lambda_s1)
647
+ log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t)
648
+ sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
649
+ alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
650
+
651
+ if self.algorithm_type == "dpmsolver++":
652
+ phi_11 = torch.expm1(-r1 * h)
653
+ phi_1 = torch.expm1(-h)
654
+
655
+ if model_s is None:
656
+ model_s = self.model_fn(x, s)
657
+ x_s1 = (
658
+ (sigma_s1 / sigma_s) * x
659
+ - (alpha_s1 * phi_11) * model_s
660
+ )
661
+ model_s1 = self.model_fn(x_s1, s1)
662
+ if solver_type == 'dpmsolver':
663
+ x_t = (
664
+ (sigma_t / sigma_s) * x
665
+ - (alpha_t * phi_1) * model_s
666
+ - (0.5 / r1) * (alpha_t * phi_1) * (model_s1 - model_s)
667
+ )
668
+ elif solver_type == 'taylor':
669
+ x_t = (
670
+ (sigma_t / sigma_s) * x
671
+ - (alpha_t * phi_1) * model_s
672
+ + (1. / r1) * (alpha_t * (phi_1 / h + 1.)) * (model_s1 - model_s)
673
+ )
674
+ else:
675
+ phi_11 = torch.expm1(r1 * h)
676
+ phi_1 = torch.expm1(h)
677
+
678
+ if model_s is None:
679
+ model_s = self.model_fn(x, s)
680
+ x_s1 = (
681
+ torch.exp(log_alpha_s1 - log_alpha_s) * x
682
+ - (sigma_s1 * phi_11) * model_s
683
+ )
684
+ model_s1 = self.model_fn(x_s1, s1)
685
+ if solver_type == 'dpmsolver':
686
+ x_t = (
687
+ torch.exp(log_alpha_t - log_alpha_s) * x
688
+ - (sigma_t * phi_1) * model_s
689
+ - (0.5 / r1) * (sigma_t * phi_1) * (model_s1 - model_s)
690
+ )
691
+ elif solver_type == 'taylor':
692
+ x_t = (
693
+ torch.exp(log_alpha_t - log_alpha_s) * x
694
+ - (sigma_t * phi_1) * model_s
695
+ - (1. / r1) * (sigma_t * (phi_1 / h - 1.)) * (model_s1 - model_s)
696
+ )
697
+ if return_intermediate:
698
+ return x_t, {'model_s': model_s, 'model_s1': model_s1}
699
+ else:
700
+ return x_t
701
+
702
+ def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpmsolver'):
703
+ """
704
+ Singlestep solver DPM-Solver-3 from time `s` to time `t`.
705
+
706
+ Args:
707
+ x: A pytorch tensor. The initial value at time `s`.
708
+ s: A pytorch tensor. The starting time, with the shape (1,).
709
+ t: A pytorch tensor. The ending time, with the shape (1,).
710
+ r1: A `float`. The hyperparameter of the third-order solver.
711
+ r2: A `float`. The hyperparameter of the third-order solver.
712
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
713
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
714
+ model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
715
+ If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
716
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
717
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
718
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
719
+ Returns:
720
+ x_t: A pytorch tensor. The approximated solution at time `t`.
721
+ """
722
+ if solver_type not in ['dpmsolver', 'taylor']:
723
+ raise ValueError("'solver_type' must be either 'dpmsolver' or 'taylor', got {}".format(solver_type))
724
+ if r1 is None:
725
+ r1 = 1. / 3.
726
+ if r2 is None:
727
+ r2 = 2. / 3.
728
+ ns = self.noise_schedule
729
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
730
+ h = lambda_t - lambda_s
731
+ lambda_s1 = lambda_s + r1 * h
732
+ lambda_s2 = lambda_s + r2 * h
733
+ s1 = ns.inverse_lambda(lambda_s1)
734
+ s2 = ns.inverse_lambda(lambda_s2)
735
+ log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
736
+ sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t)
737
+ alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
738
+
739
+ if self.algorithm_type == "dpmsolver++":
740
+ phi_11 = torch.expm1(-r1 * h)
741
+ phi_12 = torch.expm1(-r2 * h)
742
+ phi_1 = torch.expm1(-h)
743
+ phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
744
+ phi_2 = phi_1 / h + 1.
745
+ phi_3 = phi_2 / h - 0.5
746
+
747
+ if model_s is None:
748
+ model_s = self.model_fn(x, s)
749
+ if model_s1 is None:
750
+ x_s1 = (
751
+ (sigma_s1 / sigma_s) * x
752
+ - (alpha_s1 * phi_11) * model_s
753
+ )
754
+ model_s1 = self.model_fn(x_s1, s1)
755
+ x_s2 = (
756
+ (sigma_s2 / sigma_s) * x
757
+ - (alpha_s2 * phi_12) * model_s
758
+ + r2 / r1 * (alpha_s2 * phi_22) * (model_s1 - model_s)
759
+ )
760
+ model_s2 = self.model_fn(x_s2, s2)
761
+ if solver_type == 'dpmsolver':
762
+ x_t = (
763
+ (sigma_t / sigma_s) * x
764
+ - (alpha_t * phi_1) * model_s
765
+ + (1. / r2) * (alpha_t * phi_2) * (model_s2 - model_s)
766
+ )
767
+ elif solver_type == 'taylor':
768
+ D1_0 = (1. / r1) * (model_s1 - model_s)
769
+ D1_1 = (1. / r2) * (model_s2 - model_s)
770
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
771
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
772
+ x_t = (
773
+ (sigma_t / sigma_s) * x
774
+ - (alpha_t * phi_1) * model_s
775
+ + (alpha_t * phi_2) * D1
776
+ - (alpha_t * phi_3) * D2
777
+ )
778
+ else:
779
+ phi_11 = torch.expm1(r1 * h)
780
+ phi_12 = torch.expm1(r2 * h)
781
+ phi_1 = torch.expm1(h)
782
+ phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
783
+ phi_2 = phi_1 / h - 1.
784
+ phi_3 = phi_2 / h - 0.5
785
+
786
+ if model_s is None:
787
+ model_s = self.model_fn(x, s)
788
+ if model_s1 is None:
789
+ x_s1 = (
790
+ (torch.exp(log_alpha_s1 - log_alpha_s)) * x
791
+ - (sigma_s1 * phi_11) * model_s
792
+ )
793
+ model_s1 = self.model_fn(x_s1, s1)
794
+ x_s2 = (
795
+ (torch.exp(log_alpha_s2 - log_alpha_s)) * x
796
+ - (sigma_s2 * phi_12) * model_s
797
+ - r2 / r1 * (sigma_s2 * phi_22) * (model_s1 - model_s)
798
+ )
799
+ model_s2 = self.model_fn(x_s2, s2)
800
+ if solver_type == 'dpmsolver':
801
+ x_t = (
802
+ (torch.exp(log_alpha_t - log_alpha_s)) * x
803
+ - (sigma_t * phi_1) * model_s
804
+ - (1. / r2) * (sigma_t * phi_2) * (model_s2 - model_s)
805
+ )
806
+ elif solver_type == 'taylor':
807
+ D1_0 = (1. / r1) * (model_s1 - model_s)
808
+ D1_1 = (1. / r2) * (model_s2 - model_s)
809
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
810
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
811
+ x_t = (
812
+ (torch.exp(log_alpha_t - log_alpha_s)) * x
813
+ - (sigma_t * phi_1) * model_s
814
+ - (sigma_t * phi_2) * D1
815
+ - (sigma_t * phi_3) * D2
816
+ )
817
+
818
+ if return_intermediate:
819
+ return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
820
+ else:
821
+ return x_t
822
+
823
+ def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpmsolver"):
824
+ """
825
+ Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
826
+
827
+ Args:
828
+ x: A pytorch tensor. The initial value at time `s`.
829
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
830
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)
831
+ t: A pytorch tensor. The ending time, with the shape (1,).
832
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
833
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
834
+ Returns:
835
+ x_t: A pytorch tensor. The approximated solution at time `t`.
836
+ """
837
+ if solver_type not in ['dpmsolver', 'taylor']:
838
+ raise ValueError("'solver_type' must be either 'dpmsolver' or 'taylor', got {}".format(solver_type))
839
+ ns = self.noise_schedule
840
+ model_prev_1, model_prev_0 = model_prev_list[-2], model_prev_list[-1]
841
+ t_prev_1, t_prev_0 = t_prev_list[-2], t_prev_list[-1]
842
+ lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
843
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
844
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
845
+ alpha_t = torch.exp(log_alpha_t)
846
+
847
+ h_0 = lambda_prev_0 - lambda_prev_1
848
+ h = lambda_t - lambda_prev_0
849
+ r0 = h_0 / h
850
+ D1_0 = (1. / r0) * (model_prev_0 - model_prev_1)
851
+ if self.algorithm_type == "dpmsolver++":
852
+ phi_1 = torch.expm1(-h)
853
+ if solver_type == 'dpmsolver':
854
+ x_t = (
855
+ (sigma_t / sigma_prev_0) * x
856
+ - (alpha_t * phi_1) * model_prev_0
857
+ - 0.5 * (alpha_t * phi_1) * D1_0
858
+ )
859
+ elif solver_type == 'taylor':
860
+ x_t = (
861
+ (sigma_t / sigma_prev_0) * x
862
+ - (alpha_t * phi_1) * model_prev_0
863
+ + (alpha_t * (phi_1 / h + 1.)) * D1_0
864
+ )
865
+ else:
866
+ phi_1 = torch.expm1(h)
867
+ if solver_type == 'dpmsolver':
868
+ x_t = (
869
+ (torch.exp(log_alpha_t - log_alpha_prev_0)) * x
870
+ - (sigma_t * phi_1) * model_prev_0
871
+ - 0.5 * (sigma_t * phi_1) * D1_0
872
+ )
873
+ elif solver_type == 'taylor':
874
+ x_t = (
875
+ (torch.exp(log_alpha_t - log_alpha_prev_0)) * x
876
+ - (sigma_t * phi_1) * model_prev_0
877
+ - (sigma_t * (phi_1 / h - 1.)) * D1_0
878
+ )
879
+ return x_t
880
+
881
+ def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpmsolver'):
882
+ """
883
+ Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
884
+
885
+ Args:
886
+ x: A pytorch tensor. The initial value at time `s`.
887
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
888
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)
889
+ t: A pytorch tensor. The ending time, with the shape (1,).
890
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
891
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
892
+ Returns:
893
+ x_t: A pytorch tensor. The approximated solution at time `t`.
894
+ """
895
+ ns = self.noise_schedule
896
+ model_prev_2, model_prev_1, model_prev_0 = model_prev_list
897
+ t_prev_2, t_prev_1, t_prev_0 = t_prev_list
898
+ lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
899
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
900
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
901
+ alpha_t = torch.exp(log_alpha_t)
902
+
903
+ h_1 = lambda_prev_1 - lambda_prev_2
904
+ h_0 = lambda_prev_0 - lambda_prev_1
905
+ h = lambda_t - lambda_prev_0
906
+ r0, r1 = h_0 / h, h_1 / h
907
+ D1_0 = (1. / r0) * (model_prev_0 - model_prev_1)
908
+ D1_1 = (1. / r1) * (model_prev_1 - model_prev_2)
909
+ D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)
910
+ D2 = (1. / (r0 + r1)) * (D1_0 - D1_1)
911
+ if self.algorithm_type == "dpmsolver++":
912
+ phi_1 = torch.expm1(-h)
913
+ phi_2 = phi_1 / h + 1.
914
+ phi_3 = phi_2 / h - 0.5
915
+ x_t = (
916
+ (sigma_t / sigma_prev_0) * x
917
+ - (alpha_t * phi_1) * model_prev_0
918
+ + (alpha_t * phi_2) * D1
919
+ - (alpha_t * phi_3) * D2
920
+ )
921
+ else:
922
+ phi_1 = torch.expm1(h)
923
+ phi_2 = phi_1 / h - 1.
924
+ phi_3 = phi_2 / h - 0.5
925
+ x_t = (
926
+ (torch.exp(log_alpha_t - log_alpha_prev_0)) * x
927
+ - (sigma_t * phi_1) * model_prev_0
928
+ - (sigma_t * phi_2) * D1
929
+ - (sigma_t * phi_3) * D2
930
+ )
931
+ return x_t
932
+
933
+ def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpmsolver', r1=None, r2=None):
934
+ """
935
+ Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
936
+
937
+ Args:
938
+ x: A pytorch tensor. The initial value at time `s`.
939
+ s: A pytorch tensor. The starting time, with the shape (1,).
940
+ t: A pytorch tensor. The ending time, with the shape (1,).
941
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
942
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
943
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
944
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
945
+ r1: A `float`. The hyperparameter of the second-order or third-order solver.
946
+ r2: A `float`. The hyperparameter of the third-order solver.
947
+ Returns:
948
+ x_t: A pytorch tensor. The approximated solution at time `t`.
949
+ """
950
+ if order == 1:
951
+ return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
952
+ elif order == 2:
953
+ return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1)
954
+ elif order == 3:
955
+ return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2)
956
+ else:
957
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
958
+
959
+ def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpmsolver'):
960
+ """
961
+ Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
962
+
963
+ Args:
964
+ x: A pytorch tensor. The initial value at time `s`.
965
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
966
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)
967
+ t: A pytorch tensor. The ending time, with the shape (1,).
968
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
969
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
970
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
971
+ Returns:
972
+ x_t: A pytorch tensor. The approximated solution at time `t`.
973
+ """
974
+ if order == 1:
975
+ return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
976
+ elif order == 2:
977
+ return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
978
+ elif order == 3:
979
+ return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
980
+ else:
981
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
982
+
983
+ def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpmsolver'):
984
+ """
985
+ The adaptive step size solver based on singlestep DPM-Solver.
986
+
987
+ Args:
988
+ x: A pytorch tensor. The initial value at time `t_T`.
989
+ order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
990
+ t_T: A `float`. The starting time of the sampling (default is T).
991
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
992
+ h_init: A `float`. The initial step size (for logSNR).
993
+ atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
994
+ rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
995
+ theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
996
+ t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
997
+ current time and `t_0` is less than `t_err`. The default setting is 1e-5.
998
+ solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.
999
+ The type slightly impacts the performance. We recommend to use 'dpmsolver' type.
1000
+ Returns:
1001
+ x_0: A pytorch tensor. The approximated solution at time `t_0`.
1002
+
1003
+ [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
1004
+ """
1005
+ ns = self.noise_schedule
1006
+ s = t_T * torch.ones((1,)).to(x)
1007
+ lambda_s = ns.marginal_lambda(s)
1008
+ lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
1009
+ h = h_init * torch.ones_like(s).to(x)
1010
+ x_prev = x
1011
+ nfe = 0
1012
+ if order == 2:
1013
+ r1 = 0.5
1014
+ lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
1015
+ higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs)
1016
+ elif order == 3:
1017
+ r1, r2 = 1. / 3., 2. / 3.
1018
+ lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type)
1019
+ higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs)
1020
+ else:
1021
+ raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
1022
+ while torch.abs((s - t_0)).mean() > t_err:
1023
+ t = ns.inverse_lambda(lambda_s + h)
1024
+ x_lower, lower_noise_kwargs = lower_update(x, s, t)
1025
+ x_higher = higher_update(x, s, t, **lower_noise_kwargs)
1026
+ delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
1027
+ norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
1028
+ E = norm_fn((x_higher - x_lower) / delta).max()
1029
+ if torch.all(E <= 1.):
1030
+ x = x_higher
1031
+ s = t
1032
+ x_prev = x_lower
1033
+ lambda_s = ns.marginal_lambda(s)
1034
+ h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
1035
+ nfe += order
1036
+ print('adaptive solver nfe', nfe)
1037
+ return x
1038
+
1039
+ def add_noise(self, x, t, noise=None):
1040
+ """
1041
+ Compute the noised input xt = alpha_t * x + sigma_t * noise.
1042
+
1043
+ Args:
1044
+ x: A `torch.Tensor` with shape `(batch_size, *shape)`.
1045
+ t: A `torch.Tensor` with shape `(t_size,)`.
1046
+ Returns:
1047
+ xt with shape `(t_size, batch_size, *shape)`.
1048
+ """
1049
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
1050
+ if noise is None:
1051
+ noise = torch.randn((t.shape[0], *x.shape), device=x.device)
1052
+ x = x.reshape((-1, *x.shape))
1053
+ xt = expand_dims(alpha_t, x.dim()) * x + expand_dims(sigma_t, x.dim()) * noise
1054
+ if t.shape[0] == 1:
1055
+ return xt.squeeze(0)
1056
+ else:
1057
+ return xt
1058
+
1059
+ def inverse(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform',
1060
+ method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver',
1061
+ atol=0.0078, rtol=0.05, return_intermediate=False,
1062
+ ):
1063
+ """
1064
+ Inverse the sample `x` from time `t_start` to `t_end` by DPM-Solver.
1065
+ For discrete-time DPMs, we use `t_start=1/N`, where `N` is the total time steps during training.
1066
+ """
1067
+ t_0 = 1. / self.noise_schedule.total_N if t_start is None else t_start
1068
+ t_T = self.noise_schedule.T if t_end is None else t_end
1069
+ assert t_0 > 0 and t_T > 0, "Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array"
1070
+ return self.sample(x, steps=steps, t_start=t_0, t_end=t_T, order=order, skip_type=skip_type,
1071
+ method=method, lower_order_final=lower_order_final, denoise_to_zero=denoise_to_zero, solver_type=solver_type,
1072
+ atol=atol, rtol=rtol, return_intermediate=return_intermediate)
1073
+
1074
+ def sample(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform',
1075
+ method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver',
1076
+ atol=0.0078, rtol=0.05, return_intermediate=False,
1077
+ ):
1078
+ """
1079
+ Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
1080
+
1081
+ =====================================================
1082
+
1083
+ We support the following algorithms for both noise prediction model and data prediction model:
1084
+ - 'singlestep':
1085
+ Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
1086
+ We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
1087
+ The total number of function evaluations (NFE) == `steps`.
1088
+ Given a fixed NFE == `steps`, the sampling procedure is:
1089
+ - If `order` == 1:
1090
+ - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
1091
+ - If `order` == 2:
1092
+ - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
1093
+ - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
1094
+ - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
1095
+ - If `order` == 3:
1096
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
1097
+ - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
1098
+ - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
1099
+ - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
1100
+ - 'multistep':
1101
+ Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
1102
+ We initialize the first `order` values by lower order multistep solvers.
1103
+ Given a fixed NFE == `steps`, the sampling procedure is:
1104
+ Denote K = steps.
1105
+ - If `order` == 1:
1106
+ - We use K steps of DPM-Solver-1 (i.e. DDIM).
1107
+ - If `order` == 2:
1108
+ - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
1109
+ - If `order` == 3:
1110
+ - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
1111
+ - 'singlestep_fixed':
1112
+ Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
1113
+ We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
1114
+ - 'adaptive':
1115
+ Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
1116
+ We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
1117
+ You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
1118
+ (NFE) and the sample quality.
1119
+ - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
1120
+ - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
1121
+
1122
+ =====================================================
1123
+
1124
+ Some advices for choosing the algorithm:
1125
+ - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
1126
+ Use singlestep DPM-Solver or DPM-Solver++ ("DPM-Solver-fast" in the paper) with `order = 3`.
1127
+ e.g., DPM-Solver:
1128
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver")
1129
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
1130
+ skip_type='time_uniform', method='singlestep')
1131
+ e.g., DPM-Solver++:
1132
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver++")
1133
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
1134
+ skip_type='time_uniform', method='singlestep')
1135
+ - For **guided sampling with large guidance scale** by DPMs:
1136
+ Use multistep DPM-Solver with `algorithm_type="dpmsolver++"` and `order = 2`.
1137
+ e.g.
1138
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver++")
1139
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
1140
+ skip_type='time_uniform', method='multistep')
1141
+
1142
+ We support three types of `skip_type`:
1143
+ - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
1144
+ - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
1145
+ - 'time_quadratic': quadratic time for the time steps.
1146
+
1147
+ =====================================================
1148
+ Args:
1149
+ x: A pytorch tensor. The initial value at time `t_start`
1150
+ e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
1151
+ steps: A `int`. The total number of function evaluations (NFE).
1152
+ t_start: A `float`. The starting time of the sampling.
1153
+ If `T` is None, we use self.noise_schedule.T (default is 1.0).
1154
+ t_end: A `float`. The ending time of the sampling.
1155
+ If `t_end` is None, we use 1. / self.noise_schedule.total_N.
1156
+ e.g. if total_N == 1000, we have `t_end` == 1e-3.
1157
+ For discrete-time DPMs:
1158
+ - We recommend `t_end` == 1. / self.noise_schedule.total_N.
1159
+ For continuous-time DPMs:
1160
+ - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
1161
+ order: A `int`. The order of DPM-Solver.
1162
+ skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
1163
+ method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
1164
+ denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
1165
+ Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
1166
+
1167
+ This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
1168
+ score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
1169
+ for diffusion models sampling by diffusion SDEs for low-resolutional images
1170
+ (such as CIFAR-10). However, we observed that such trick does not matter for
1171
+ high-resolutional images. As it needs an additional NFE, we do not recommend
1172
+ it for high-resolutional images.
1173
+ lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
1174
+ Only valid for `method=multistep` and `steps < 15`. We empirically find that
1175
+ this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
1176
+ (especially for steps <= 10). So we recommend to set it to be `True`.
1177
+ solver_type: A `str`. The taylor expansion type for the solver. `dpmsolver` or `taylor`. We recommend `dpmsolver`.
1178
+ atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1179
+ rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1180
+ return_intermediate: A `bool`. Whether to save the xt at each step.
1181
+ When set to `True`, method returns a tuple (x0, intermediates); when set to False, method returns only x0.
1182
+ Returns:
1183
+ x_end: A pytorch tensor. The approximated solution at time `t_end`.
1184
+
1185
+ """
1186
+ t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
1187
+ t_T = self.noise_schedule.T if t_start is None else t_start
1188
+ assert t_0 > 0 and t_T > 0, "Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array"
1189
+ if return_intermediate:
1190
+ assert method in ['multistep', 'singlestep', 'singlestep_fixed'], "Cannot use adaptive solver when saving intermediate values"
1191
+ if self.correcting_xt_fn is not None:
1192
+ assert method in ['multistep', 'singlestep', 'singlestep_fixed'], "Cannot use adaptive solver when correcting_xt_fn is not None"
1193
+ device = x.device
1194
+ intermediates = []
1195
+ with torch.no_grad():
1196
+ if method == 'adaptive':
1197
+ x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type)
1198
+ elif method == 'multistep':
1199
+ assert steps >= order
1200
+ timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
1201
+ assert timesteps.shape[0] - 1 == steps
1202
+ # Init the initial values.
1203
+ step = 0
1204
+ t = timesteps[step]
1205
+ t_prev_list = [t]
1206
+ model_prev_list = [self.model_fn(x, t)]
1207
+ if self.correcting_xt_fn is not None:
1208
+ x = self.correcting_xt_fn(x, t, step)
1209
+ if return_intermediate:
1210
+ intermediates.append(x)
1211
+ # Init the first `order` values by lower order multistep DPM-Solver.
1212
+ for step in range(1, order):
1213
+ t = timesteps[step]
1214
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step, solver_type=solver_type)
1215
+ if self.correcting_xt_fn is not None:
1216
+ x = self.correcting_xt_fn(x, t, step)
1217
+ if return_intermediate:
1218
+ intermediates.append(x)
1219
+ t_prev_list.append(t)
1220
+ model_prev_list.append(self.model_fn(x, t))
1221
+ # Compute the remaining values by `order`-th order multistep DPM-Solver.
1222
+ for step in range(order, steps + 1):
1223
+ t = timesteps[step]
1224
+ # We only use lower order for steps < 10
1225
+ if lower_order_final and steps < 10:
1226
+ step_order = min(order, steps + 1 - step)
1227
+ else:
1228
+ step_order = order
1229
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step_order, solver_type=solver_type)
1230
+ if self.correcting_xt_fn is not None:
1231
+ x = self.correcting_xt_fn(x, t, step)
1232
+ if return_intermediate:
1233
+ intermediates.append(x)
1234
+ for i in range(order - 1):
1235
+ t_prev_list[i] = t_prev_list[i + 1]
1236
+ model_prev_list[i] = model_prev_list[i + 1]
1237
+ t_prev_list[-1] = t
1238
+ # We do not need to evaluate the final model value.
1239
+ if step < steps:
1240
+ model_prev_list[-1] = self.model_fn(x, t)
1241
+ elif method in ['singlestep', 'singlestep_fixed']:
1242
+ if method == 'singlestep':
1243
+ timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device)
1244
+ elif method == 'singlestep_fixed':
1245
+ K = steps // order
1246
+ orders = [order,] * K
1247
+ timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
1248
+ for step, order in enumerate(orders):
1249
+ s, t = timesteps_outer[step], timesteps_outer[step + 1]
1250
+ timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=s.item(), t_0=t.item(), N=order, device=device)
1251
+ lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
1252
+ h = lambda_inner[-1] - lambda_inner[0]
1253
+ r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
1254
+ r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
1255
+ x = self.singlestep_dpm_solver_update(x, s, t, order, solver_type=solver_type, r1=r1, r2=r2)
1256
+ if self.correcting_xt_fn is not None:
1257
+ x = self.correcting_xt_fn(x, t, step)
1258
+ if return_intermediate:
1259
+ intermediates.append(x)
1260
+ else:
1261
+ raise ValueError("Got wrong method {}".format(method))
1262
+ if denoise_to_zero:
1263
+ t = torch.ones((1,)).to(device) * t_0
1264
+ x = self.denoise_to_zero_fn(x, t)
1265
+ if self.correcting_xt_fn is not None:
1266
+ x = self.correcting_xt_fn(x, t, step + 1)
1267
+ if return_intermediate:
1268
+ intermediates.append(x)
1269
+ if return_intermediate:
1270
+ return x, intermediates
1271
+ else:
1272
+ return x
1273
+
1274
+
1275
+
1276
+ #############################################################
1277
+ # other utility functions
1278
+ #############################################################
1279
+
1280
+ def interpolate_fn(x, xp, yp):
1281
+ """
1282
+ A piecewise linear function y = f(x), using xp and yp as keypoints.
1283
+ We implement f(x) in a differentiable way (i.e. applicable for autograd).
1284
+ The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
1285
+
1286
+ Args:
1287
+ x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
1288
+ xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
1289
+ yp: PyTorch tensor with shape [C, K].
1290
+ Returns:
1291
+ The function values f(x), with shape [N, C].
1292
+ """
1293
+ N, K = x.shape[0], xp.shape[1]
1294
+ all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
1295
+ sorted_all_x, x_indices = torch.sort(all_x, dim=2)
1296
+ x_idx = torch.argmin(x_indices, dim=2)
1297
+ cand_start_idx = x_idx - 1
1298
+ start_idx = torch.where(
1299
+ torch.eq(x_idx, 0),
1300
+ torch.tensor(1, device=x.device),
1301
+ torch.where(
1302
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1303
+ ),
1304
+ )
1305
+ end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
1306
+ start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
1307
+ end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
1308
+ start_idx2 = torch.where(
1309
+ torch.eq(x_idx, 0),
1310
+ torch.tensor(0, device=x.device),
1311
+ torch.where(
1312
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1313
+ ),
1314
+ )
1315
+ y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
1316
+ start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
1317
+ end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
1318
+ cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
1319
+ return cand
1320
+
1321
+
1322
+ def expand_dims(v, dims):
1323
+ """
1324
+ Expand the tensor `v` to the dim `dims`.
1325
+
1326
+ Args:
1327
+ `v`: a PyTorch tensor with shape [N].
1328
+ `dim`: a `int`.
1329
+ Returns:
1330
+ a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
1331
+ """
1332
+ return v[(...,) + (None,)*(dims - 1)]
src/dpm_solver/pipeline_dpm_solver.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL import Image
3
+
4
+ from .dpm_solver_pytorch import (NoiseScheduleVP,
5
+ model_wrapper,
6
+ DPM_Solver)
7
+
8
+ class FontDiffuserDPMPipeline():
9
+ """FontDiffuser pipeline with DPM_Solver scheduler.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ model,
15
+ ddpm_train_scheduler,
16
+ version="V3",
17
+ model_type="noise",
18
+ guidance_type="classifier-free",
19
+ guidance_scale=7.5
20
+ ):
21
+ super().__init__()
22
+ self.model = model
23
+ self.train_scheduler_betas = ddpm_train_scheduler.betas
24
+ # Define the noise schedule
25
+ self.noise_schedule = NoiseScheduleVP(schedule='discrete', betas=self.train_scheduler_betas)
26
+
27
+ self.version = version
28
+ self.model_type = model_type
29
+ self.guidance_type = guidance_type
30
+ self.guidance_scale = guidance_scale
31
+
32
+ def numpy_to_pil(self, images):
33
+ """Convert a numpy image or a batch of images to a PIL image.
34
+ """
35
+ if images.ndim == 3:
36
+ images = images[None, ...]
37
+ images = (images * 255).round().astype("uint8")
38
+ pil_images = [Image.fromarray(image) for image in images]
39
+
40
+ return pil_images
41
+
42
+ def generate(
43
+ self,
44
+ content_images,
45
+ style_images,
46
+ batch_size,
47
+ order,
48
+ num_inference_step,
49
+ content_encoder_downsample_size,
50
+ t_start=None,
51
+ t_end=None,
52
+ dm_size=(96, 96),
53
+ algorithm_type="dpmsolver++",
54
+ skip_type="time_uniform",
55
+ method="multistep",
56
+ correcting_x0_fn=None,
57
+ generator=None,
58
+ ):
59
+ model_kwargs = {}
60
+ model_kwargs["version"] = self.version
61
+ model_kwargs["content_encoder_downsample_size"] = content_encoder_downsample_size
62
+
63
+ cond = []
64
+ cond.append(content_images)
65
+ cond.append(style_images)
66
+
67
+ uncond = []
68
+ uncond_content_images = torch.ones_like(content_images).to(self.model.device)
69
+ uncond_style_images = torch.ones_like(style_images).to(self.model.device)
70
+ uncond.append(uncond_content_images)
71
+ uncond.append(uncond_style_images)
72
+
73
+ # 2.Convert the discrete-time model to the continuous-time
74
+ model_fn = model_wrapper(
75
+ model=self.model,
76
+ noise_schedule=self.noise_schedule,
77
+ model_type=self.model_type,
78
+ model_kwargs=model_kwargs,
79
+ guidance_type=self.guidance_type,
80
+ condition=cond,
81
+ unconditional_condition=uncond,
82
+ guidance_scale=self.guidance_scale
83
+ )
84
+
85
+ # 3. Define dpm-solver and sample by multistep DPM-Solver.
86
+ # (We recommend multistep DPM-Solver for conditional sampling)
87
+ # You can adjust the `steps` to balance the computation costs and the sample quality.
88
+ dpm_solver = DPM_Solver(
89
+ model_fn=model_fn,
90
+ noise_schedule=self.noise_schedule,
91
+ algorithm_type=algorithm_type,
92
+ correcting_x0_fn=correcting_x0_fn
93
+ )
94
+ # If the DPM is defined on pixel-space images, you can further set `correcting_x0_fn="dynamic_thresholding"
95
+
96
+ # 4. Generate
97
+ # Sample gaussian noise to begin loop => [batch, 3, height, width]
98
+ x_T = torch.randn(
99
+ (batch_size, 3, dm_size[0], dm_size[1]),
100
+ generator=generator,
101
+ )
102
+ x_T = x_T.to(self.model.device)
103
+
104
+ x_sample = dpm_solver.sample(
105
+ x=x_T,
106
+ steps=num_inference_step,
107
+ order=order,
108
+ skip_type=skip_type,
109
+ method=method,
110
+ )
111
+
112
+ x_sample = (x_sample / 2 + 0.5).clamp(0, 1)
113
+ x_sample = x_sample.cpu().permute(0, 2, 3, 1).numpy()
114
+
115
+ x_images = self.numpy_to_pil(x_sample)
116
+
117
+ return x_images
src/model.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import torch.nn as nn
4
+
5
+ from diffusers import ModelMixin
6
+ from diffusers.configuration_utils import (ConfigMixin,
7
+ register_to_config)
8
+
9
+ class FontDiffuserModel(ModelMixin, ConfigMixin):
10
+ """Forward function for FontDiffuer with content encoder \
11
+ style encoder and unet.
12
+ """
13
+
14
+ @register_to_config
15
+ def __init__(
16
+ self,
17
+ unet,
18
+ style_encoder,
19
+ content_encoder,
20
+ ):
21
+ super().__init__()
22
+ self.unet = unet
23
+ self.style_encoder = style_encoder
24
+ self.content_encoder = content_encoder
25
+
26
+ def forward(
27
+ self,
28
+ x_t,
29
+ timesteps,
30
+ style_images,
31
+ content_images,
32
+ content_encoder_downsample_size,
33
+ ):
34
+ style_img_feature, _, _ = self.style_encoder(style_images)
35
+
36
+ batch_size, channel, height, width = style_img_feature.shape
37
+ style_hidden_states = style_img_feature.permute(0, 2, 3, 1).reshape(batch_size, height*width, channel)
38
+
39
+ # Get the content feature
40
+ content_img_feature, content_residual_features = self.content_encoder(content_images)
41
+ content_residual_features.append(content_img_feature)
42
+ # Get the content feature from reference image
43
+ style_content_feature, style_content_res_features = self.content_encoder(style_images)
44
+ style_content_res_features.append(style_content_feature)
45
+
46
+ input_hidden_states = [style_img_feature, content_residual_features, \
47
+ style_hidden_states, style_content_res_features]
48
+
49
+ out = self.unet(
50
+ x_t,
51
+ timesteps,
52
+ encoder_hidden_states=input_hidden_states,
53
+ content_encoder_downsample_size=content_encoder_downsample_size,
54
+ )
55
+ noise_pred = out[0]
56
+ offset_out_sum = out[1]
57
+
58
+ return noise_pred, offset_out_sum
59
+
60
+
61
+ class FontDiffuserModelDPM(ModelMixin, ConfigMixin):
62
+ """DPM Forward function for FontDiffuer with content encoder \
63
+ style encoder and unet.
64
+ """
65
+ @register_to_config
66
+ def __init__(
67
+ self,
68
+ unet,
69
+ style_encoder,
70
+ content_encoder,
71
+ ):
72
+ super().__init__()
73
+ self.unet = unet
74
+ self.style_encoder = style_encoder
75
+ self.content_encoder = content_encoder
76
+
77
+ def forward(
78
+ self,
79
+ x_t,
80
+ timesteps,
81
+ cond,
82
+ content_encoder_downsample_size,
83
+ version,
84
+ ):
85
+ content_images = cond[0]
86
+ style_images = cond[1]
87
+
88
+ style_img_feature, _, style_residual_features = self.style_encoder(style_images)
89
+
90
+ batch_size, channel, height, width = style_img_feature.shape
91
+ style_hidden_states = style_img_feature.permute(0, 2, 3, 1).reshape(batch_size, height*width, channel)
92
+
93
+ # Get content feature
94
+ content_img_feture, content_residual_features = self.content_encoder(content_images)
95
+ content_residual_features.append(content_img_feture)
96
+ # Get the content feature from reference image
97
+ style_content_feature, style_content_res_features = self.content_encoder(style_images)
98
+ style_content_res_features.append(style_content_feature)
99
+
100
+ input_hidden_states = [style_img_feature, content_residual_features, style_hidden_states, style_content_res_features]
101
+
102
+ out = self.unet(
103
+ x_t,
104
+ timesteps,
105
+ encoder_hidden_states=input_hidden_states,
106
+ content_encoder_downsample_size=content_encoder_downsample_size,
107
+ )
108
+ noise_pred = out[0]
109
+
110
+ return noise_pred
src/modules/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .content_encoder import ContentEncoder
2
+ from .style_encoder import StyleEncoder
3
+ from .unet import UNet
src/modules/attention.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+ from torch import nn
5
+ import torch.nn.functional as F
6
+
7
+
8
+ class SpatialTransformer(nn.Module):
9
+ """
10
+ Transformer block for image-like data. First, project the input (aka embedding) and reshape to b, t, d. Then apply
11
+ standard transformer action. Finally, reshape to image.
12
+
13
+ Parameters:
14
+ in_channels (:obj:`int`): The number of channels in the input and output.
15
+ n_heads (:obj:`int`): The number of heads to use for multi-head attention.
16
+ d_head (:obj:`int`): The number of channels in each head.
17
+ depth (:obj:`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
18
+ dropout (:obj:`float`, *optional*, defaults to 0.1): The dropout probability to use.
19
+ context_dim (:obj:`int`, *optional*): The number of context dimensions to use.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ in_channels: int,
25
+ n_heads: int,
26
+ d_head: int,
27
+ depth: int = 1,
28
+ dropout: float = 0.0,
29
+ num_groups: int = 32,
30
+ context_dim: Optional[int] = None,
31
+ ):
32
+ super().__init__()
33
+ self.n_heads = n_heads
34
+ self.d_head = d_head
35
+ self.in_channels = in_channels
36
+ inner_dim = n_heads * d_head
37
+ self.norm = torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
38
+
39
+ self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
40
+
41
+ self.transformer_blocks = nn.ModuleList(
42
+ [
43
+ BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim)
44
+ for d in range(depth)
45
+ ]
46
+ )
47
+
48
+ self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
49
+
50
+ def _set_attention_slice(self, slice_size):
51
+ for block in self.transformer_blocks:
52
+ block._set_attention_slice(slice_size)
53
+
54
+ def forward(self, hidden_states, context=None):
55
+ # note: if no context is given, cross-attention defaults to self-attention
56
+ batch, channel, height, weight = hidden_states.shape
57
+ residual = hidden_states
58
+ hidden_states = self.norm(hidden_states)
59
+ hidden_states = self.proj_in(hidden_states)
60
+ inner_dim = hidden_states.shape[1]
61
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim) # here change the shape torch.Size([1, 4096, 128])
62
+ for block in self.transformer_blocks:
63
+ hidden_states = block(hidden_states, context=context) # hidden_states: torch.Size([1, 4096, 128])
64
+ hidden_states = hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2) # torch.Size([1, 128, 64, 64])
65
+ hidden_states = self.proj_out(hidden_states)
66
+ return hidden_states + residual
67
+
68
+
69
+ class BasicTransformerBlock(nn.Module):
70
+ r"""
71
+ A basic Transformer block.
72
+
73
+ Parameters:
74
+ dim (:obj:`int`): The number of channels in the input and output.
75
+ n_heads (:obj:`int`): The number of heads to use for multi-head attention.
76
+ d_head (:obj:`int`): The number of channels in each head.
77
+ dropout (:obj:`float`, *optional*, defaults to 0.0): The dropout probability to use.
78
+ context_dim (:obj:`int`, *optional*): The size of the context vector for cross attention.
79
+ gated_ff (:obj:`bool`, *optional*, defaults to :obj:`False`): Whether to use a gated feed-forward network.
80
+ checkpoint (:obj:`bool`, *optional*, defaults to :obj:`False`): Whether to use checkpointing.
81
+ """
82
+
83
+ def __init__(
84
+ self,
85
+ dim: int,
86
+ n_heads: int,
87
+ d_head: int,
88
+ dropout=0.0,
89
+ context_dim: Optional[int] = None,
90
+ gated_ff: bool = True,
91
+ checkpoint: bool = True,
92
+ ):
93
+ super().__init__()
94
+ self.attn1 = CrossAttention(
95
+ query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout
96
+ ) # is a self-attention
97
+ self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
98
+ self.attn2 = CrossAttention(
99
+ query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout
100
+ ) # is self-attn if context is none
101
+ self.norm1 = nn.LayerNorm(dim)
102
+ self.norm2 = nn.LayerNorm(dim)
103
+ self.norm3 = nn.LayerNorm(dim)
104
+ self.checkpoint = checkpoint
105
+
106
+ def _set_attention_slice(self, slice_size):
107
+ self.attn1._slice_size = slice_size
108
+ self.attn2._slice_size = slice_size
109
+
110
+ def forward(self, hidden_states, context=None):
111
+ hidden_states = hidden_states.contiguous() if hidden_states.device.type == "mps" else hidden_states
112
+ hidden_states = self.attn1(self.norm1(hidden_states)) + hidden_states # hidden_states: torch.Size([1, 4096, 128])
113
+ hidden_states = self.attn2(self.norm2(hidden_states), context=context) + hidden_states
114
+ hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states
115
+ return hidden_states
116
+
117
+
118
+ class FeedForward(nn.Module):
119
+ r"""
120
+ A feed-forward layer.
121
+
122
+ Parameters:
123
+ dim (:obj:`int`): The number of channels in the input.
124
+ dim_out (:obj:`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
125
+ mult (:obj:`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
126
+ glu (:obj:`bool`, *optional*, defaults to :obj:`False`): Whether to use GLU activation.
127
+ dropout (:obj:`float`, *optional*, defaults to 0.0): The dropout probability to use.
128
+ """
129
+
130
+ def __init__(
131
+ self, dim: int, dim_out: Optional[int] = None, mult: int = 4, glu: bool = False, dropout: float = 0.0
132
+ ):
133
+ super().__init__()
134
+ inner_dim = int(dim * mult)
135
+ dim_out = dim_out if dim_out is not None else dim
136
+ project_in = GEGLU(dim, inner_dim)
137
+
138
+ self.net = nn.Sequential(project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out))
139
+
140
+ def forward(self, hidden_states):
141
+ return self.net(hidden_states)
142
+
143
+
144
+ class GEGLU(nn.Module):
145
+ r"""
146
+ A variant of the gated linear unit activation function from https://arxiv.org/abs/2002.05202.
147
+
148
+ Parameters:
149
+ dim_in (:obj:`int`): The number of channels in the input.
150
+ dim_out (:obj:`int`): The number of channels in the output.
151
+ """
152
+
153
+ def __init__(self, dim_in: int, dim_out: int):
154
+ super().__init__()
155
+ self.proj = nn.Linear(dim_in, dim_out * 2)
156
+
157
+ def forward(self, hidden_states):
158
+ hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1)
159
+ return hidden_states * F.gelu(gate)
160
+
161
+
162
+ class CrossAttention(nn.Module):
163
+ r"""
164
+ A cross attention layer.
165
+
166
+ Parameters:
167
+ query_dim (:obj:`int`): The number of channels in the query.
168
+ context_dim (:obj:`int`, *optional*):
169
+ The number of channels in the context. If not given, defaults to `query_dim`.
170
+ heads (:obj:`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention.
171
+ dim_head (:obj:`int`, *optional*, defaults to 64): The number of channels in each head.
172
+ dropout (:obj:`float`, *optional*, defaults to 0.0): The dropout probability to use.
173
+ """
174
+
175
+ def __init__(
176
+ self, query_dim: int, context_dim: Optional[int] = None, heads: int = 8, dim_head: int = 64, dropout: int = 0.0
177
+ ):
178
+ super().__init__()
179
+ inner_dim = dim_head * heads
180
+ context_dim = context_dim if context_dim is not None else query_dim
181
+
182
+ self.scale = dim_head**-0.5
183
+ self.heads = heads
184
+ # for slice_size > 0 the attention score computation
185
+ # is split across the batch axis to save memory
186
+ # You can set slice_size with `set_attention_slice`
187
+ self._slice_size = None
188
+
189
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
190
+ self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
191
+ self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
192
+
193
+ self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))
194
+
195
+ def reshape_heads_to_batch_dim(self, tensor):
196
+ batch_size, seq_len, dim = tensor.shape
197
+ head_size = self.heads
198
+ tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
199
+ tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size * head_size, seq_len, dim // head_size)
200
+ return tensor
201
+
202
+ def reshape_batch_dim_to_heads(self, tensor):
203
+ batch_size, seq_len, dim = tensor.shape
204
+ head_size = self.heads
205
+ tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
206
+ tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size)
207
+ return tensor
208
+
209
+ def forward(self, hidden_states, context=None, mask=None):
210
+ batch_size, sequence_length, _ = hidden_states.shape
211
+
212
+ query = self.to_q(hidden_states)
213
+ context = context if context is not None else hidden_states
214
+ key = self.to_k(context)
215
+ value = self.to_v(context)
216
+
217
+ dim = query.shape[-1]
218
+
219
+ query = self.reshape_heads_to_batch_dim(query)
220
+ key = self.reshape_heads_to_batch_dim(key)
221
+ value = self.reshape_heads_to_batch_dim(value)
222
+
223
+ # TODO(PVP) - mask is currently never used. Remember to re-implement when used
224
+
225
+ # attention, what we cannot get enough of
226
+
227
+ if self._slice_size is None or query.shape[0] // self._slice_size == 1:
228
+ hidden_states = self._attention(query, key, value)
229
+ else:
230
+ hidden_states = self._sliced_attention(query, key, value, sequence_length, dim)
231
+
232
+ return self.to_out(hidden_states)
233
+
234
+ def _attention(self, query, key, value):
235
+ # TODO: use baddbmm for better performance
236
+ attention_scores = torch.matmul(query, key.transpose(-1, -2)) * self.scale
237
+ attention_probs = attention_scores.softmax(dim=-1)
238
+ # compute attention output
239
+ hidden_states = torch.matmul(attention_probs, value)
240
+ # reshape hidden_states
241
+ hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
242
+ return hidden_states
243
+
244
+ def _sliced_attention(self, query, key, value, sequence_length, dim):
245
+ batch_size_attention = query.shape[0]
246
+ hidden_states = torch.zeros(
247
+ (batch_size_attention, sequence_length, dim // self.heads), device=query.device, dtype=query.dtype
248
+ )
249
+ slice_size = self._slice_size if self._slice_size is not None else hidden_states.shape[0]
250
+ for i in range(hidden_states.shape[0] // slice_size):
251
+ start_idx = i * slice_size
252
+ end_idx = (i + 1) * slice_size
253
+ attn_slice = (
254
+ torch.matmul(query[start_idx:end_idx], key[start_idx:end_idx].transpose(1, 2)) * self.scale
255
+ ) # TODO: use baddbmm for better performance
256
+ attn_slice = attn_slice.softmax(dim=-1)
257
+ attn_slice = torch.matmul(attn_slice, value[start_idx:end_idx])
258
+
259
+ hidden_states[start_idx:end_idx] = attn_slice
260
+
261
+ # reshape hidden_states
262
+ hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
263
+ return hidden_states
264
+
265
+
266
+ class OffsetRefStrucInter(nn.Module):
267
+
268
+ def __init__(
269
+ self,
270
+ res_in_channels: int,
271
+ style_feat_in_channels: int,
272
+ n_heads: int,
273
+ num_groups: int = 32,
274
+ dropout: float = 0.0,
275
+ gated_ff: bool = True,
276
+ ):
277
+ super().__init__()
278
+ # style feat projecter
279
+ self.style_proj_in = nn.Conv2d(style_feat_in_channels, style_feat_in_channels, kernel_size=1, stride=1, padding=0)
280
+ self.gnorm_s = torch.nn.GroupNorm(num_groups=num_groups, num_channels=style_feat_in_channels, eps=1e-6, affine=True)
281
+ self.ln_s = nn.LayerNorm(style_feat_in_channels)
282
+
283
+ # content feat projecter
284
+ self.content_proj_in = nn.Conv2d(res_in_channels, res_in_channels, kernel_size=1, stride=1, padding=0)
285
+ self.gnorm_c = torch.nn.GroupNorm(num_groups=num_groups, num_channels=res_in_channels, eps=1e-6, affine=True)
286
+ self.ln_c = nn.LayerNorm(res_in_channels)
287
+
288
+ # cross-attention
289
+ # dim_head is the middle dealing dimension, output dimension will be change to quert_dim by Linear
290
+ self.cross_attention = CrossAttention(
291
+ query_dim=style_feat_in_channels, context_dim=res_in_channels, heads=n_heads, dim_head=res_in_channels, dropout=dropout
292
+ )
293
+
294
+ # FFN
295
+ self.ff = FeedForward(style_feat_in_channels, dropout=dropout, glu=gated_ff)
296
+ self.ln_ff = nn.LayerNorm(style_feat_in_channels)
297
+
298
+ self.gnorm_out = torch.nn.GroupNorm(num_groups=num_groups, num_channels=style_feat_in_channels, eps=1e-6, affine=True)
299
+ self.proj_out = nn.Conv2d(style_feat_in_channels, 1*2*3*3, kernel_size=1, stride=1, padding=0)
300
+
301
+ def forward(self, res_hidden_states, style_content_hidden_states):
302
+ batch, c_channel, height, width = res_hidden_states.shape
303
+ _, s_channel, _, _ = style_content_hidden_states.shape
304
+ # style projecter
305
+ style_content_hidden_states = self.gnorm_s(style_content_hidden_states)
306
+ style_content_hidden_states = self.style_proj_in(style_content_hidden_states)
307
+
308
+ style_content_hidden_states = style_content_hidden_states.permute(0, 2, 3, 1).reshape(batch, height*width, s_channel)
309
+ style_content_hidden_states = self.ln_s(style_content_hidden_states)
310
+
311
+ # content projecter
312
+ res_hidden_states = self.gnorm_c(res_hidden_states)
313
+ res_hidden_states = self.content_proj_in(res_hidden_states)
314
+
315
+ res_hidden_states = res_hidden_states.permute(0, 2, 3, 1).reshape(batch, height*width, c_channel)
316
+ res_hidden_states = self.ln_c(res_hidden_states)
317
+
318
+ # style and content cross-attention
319
+ hidden_states = self.cross_attention(style_content_hidden_states, context=res_hidden_states)
320
+
321
+ # ffn
322
+ hidden_states = self.ff(self.ln_ff(hidden_states)) + hidden_states
323
+
324
+ # reshape
325
+ _, _, c = hidden_states.shape
326
+ reshape_out = hidden_states.permute(0, 2, 1).reshape(batch, c, height, width)
327
+
328
+ # projert out
329
+ reshape_out = self.gnorm_out(reshape_out)
330
+ offset_out = self.proj_out(reshape_out)
331
+
332
+ return offset_out
333
+
334
+
335
+ class SELayer(nn.Module):
336
+ def __init__(self, channel, reduction=16):
337
+ super().__init__()
338
+ self.avg_pool = nn.AdaptiveAvgPool2d(1)
339
+ self.fc = nn.Sequential(
340
+ nn.Linear(channel, channel // reduction, bias=False),
341
+ # nn.ReLU(inplace=True),
342
+ nn.SiLU(),
343
+ nn.Linear(channel // reduction, channel, bias=False),
344
+ nn.Sigmoid()
345
+ )
346
+
347
+ def forward(self, x):
348
+ b, c, _, _ = x.size()
349
+ y = self.avg_pool(x).view(b, c)
350
+ y = self.fc(y).view(b, c, 1, 1)
351
+ return x * y.expand_as(x)
352
+
353
+
354
+ class Mish(torch.nn.Module):
355
+ def forward(self, hidden_states):
356
+ return hidden_states * torch.tanh(torch.nn.functional.softplus(hidden_states))
357
+
358
+
359
+ class ChannelAttnBlock(nn.Module):
360
+ """This is the Channel Attention in MCA.
361
+ """
362
+ def __init__(
363
+ self,
364
+ in_channels,
365
+ out_channels,
366
+ groups=32,
367
+ groups_out=None,
368
+ eps=1e-6,
369
+ non_linearity="swish",
370
+ channel_attn=False,
371
+ reduction=32):
372
+ super().__init__()
373
+
374
+ if groups_out is None:
375
+ groups_out = groups
376
+
377
+ self.norm1 = nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
378
+ self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1)
379
+
380
+ if non_linearity == "swish":
381
+ self.nonlinearity = lambda x: F.silu(x)
382
+ elif non_linearity == "mish":
383
+ self.nonlinearity = Mish()
384
+ elif non_linearity == "silu":
385
+ self.nonlinearity = nn.SiLU()
386
+
387
+ self.channel_attn = channel_attn
388
+ if self.channel_attn:
389
+ # SE Attention
390
+ self.se_channel_attn = SELayer(channel=in_channels, reduction=reduction)
391
+
392
+ # Down channel: Use the conv1*1 to down the channel wise
393
+ self.norm3 = nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
394
+ self.down_channel = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1) # conv1*1
395
+
396
+ def forward(self, input, content_feature):
397
+
398
+ concat_feature = torch.cat([input, content_feature], dim=1)
399
+ hidden_states = concat_feature
400
+
401
+ hidden_states = self.norm1(hidden_states)
402
+ hidden_states = self.nonlinearity(hidden_states)
403
+ hidden_states = self.conv1(hidden_states)
404
+
405
+ if self.channel_attn:
406
+ hidden_states = self.se_channel_attn(hidden_states)
407
+ hidden_states = hidden_states + concat_feature
408
+
409
+ # Down channel
410
+ hidden_states = self.norm3(hidden_states)
411
+ hidden_states = self.nonlinearity(hidden_states)
412
+ hidden_states = self.down_channel(hidden_states)
413
+
414
+ return hidden_states
src/modules/content_encoder.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from torch.nn import init
7
+ from torch.nn import Parameter as P
8
+
9
+ from diffusers import ModelMixin
10
+ from diffusers.configuration_utils import (ConfigMixin,
11
+ register_to_config)
12
+
13
+
14
+ def proj(x, y):
15
+ return torch.mm(y, x.t()) * y / torch.mm(y, y.t())
16
+
17
+
18
+ def gram_schmidt(x, ys):
19
+ for y in ys:
20
+ x = x - proj(x, y)
21
+ return x
22
+
23
+
24
+ def power_iteration(W, u_, update=True, eps=1e-12):
25
+ us, vs, svs = [], [], []
26
+ for i, u in enumerate(u_):
27
+ with torch.no_grad():
28
+ v = torch.matmul(u, W)
29
+ v = F.normalize(gram_schmidt(v, vs), eps=eps)
30
+ vs += [v]
31
+ u = torch.matmul(v, W.t())
32
+ u = F.normalize(gram_schmidt(u, us), eps=eps)
33
+ us += [u]
34
+ if update:
35
+ u_[i][:] = u
36
+ svs += [torch.squeeze(torch.matmul(torch.matmul(v, W.t()), u.t()))]
37
+ return svs, us, vs
38
+
39
+
40
+ class LinearBlock(nn.Module):
41
+ def __init__(
42
+ self,
43
+ in_dim,
44
+ out_dim,
45
+ norm='none',
46
+ act='relu',
47
+ use_sn=False
48
+ ):
49
+ super(LinearBlock, self).__init__()
50
+ use_bias = True
51
+ self.fc = nn.Linear(in_dim, out_dim, bias=use_bias)
52
+ if use_sn:
53
+ self.fc = nn.utils.spectral_norm(self.fc)
54
+
55
+ # initialize normalization
56
+ norm_dim = out_dim
57
+ if norm == 'bn':
58
+ self.norm = nn.BatchNorm1d(norm_dim)
59
+ elif norm == 'in':
60
+ self.norm = nn.InstanceNorm1d(norm_dim)
61
+ elif norm == 'none':
62
+ self.norm = None
63
+ else:
64
+ assert 0, "Unsupported normalization: {}".format(norm)
65
+
66
+ # initialize activation
67
+ if act == 'relu':
68
+ self.activation = nn.ReLU(inplace=True)
69
+ elif act == 'lrelu':
70
+ self.activation = nn.LeakyReLU(0.2, inplace=True)
71
+ elif act == 'tanh':
72
+ self.activation = nn.Tanh()
73
+ elif act == 'none':
74
+ self.activation = None
75
+ else:
76
+ assert 0, "Unsupported activation: {}".format(act)
77
+
78
+ def forward(self, x):
79
+ out = self.fc(x)
80
+ if self.norm:
81
+ out = self.norm(out)
82
+ if self.activation:
83
+ out = self.activation(out)
84
+ return out
85
+
86
+
87
+ class MLP(nn.Module):
88
+ def __init__(
89
+ self,
90
+ nf_in,
91
+ nf_out,
92
+ nf_mlp,
93
+ num_blocks,
94
+ norm,
95
+ act,
96
+ use_sn =False
97
+ ):
98
+ super(MLP,self).__init__()
99
+ self.model = nn.ModuleList()
100
+ nf = nf_mlp
101
+ self.model.append(LinearBlock(nf_in, nf, norm = norm, act = act, use_sn = use_sn))
102
+ for _ in range((num_blocks - 2)):
103
+ self.model.append(LinearBlock(nf, nf, norm=norm, act=act, use_sn=use_sn))
104
+ self.model.append(LinearBlock(nf, nf_out, norm='none', act ='none', use_sn = use_sn))
105
+ self.model = nn.Sequential(*self.model)
106
+
107
+ def forward(self, x):
108
+ return self.model(x.view(x.size(0), -1))
109
+
110
+
111
+ class SN(object):
112
+ def __init__(
113
+ self,
114
+ num_svs,
115
+ num_itrs,
116
+ num_outputs,
117
+ transpose=False,
118
+ eps=1e-12
119
+ ):
120
+ self.num_itrs = num_itrs
121
+ self.num_svs = num_svs
122
+ self.transpose = transpose
123
+ self.eps = eps
124
+ for i in range(self.num_svs):
125
+ self.register_buffer('u%d' % i, torch.randn(1, num_outputs))
126
+ self.register_buffer('sv%d' % i, torch.ones(1))
127
+
128
+ @property
129
+ def u(self):
130
+ return [getattr(self, 'u%d' % i) for i in range(self.num_svs)]
131
+
132
+ @property
133
+ def sv(self):
134
+ return [getattr(self, 'sv%d' % i) for i in range(self.num_svs)]
135
+
136
+ def W_(self):
137
+ W_mat = self.weight.view(self.weight.size(0), -1)
138
+ if self.transpose:
139
+ W_mat = W_mat.t()
140
+ for _ in range(self.num_itrs):
141
+ svs, us, vs = power_iteration(W_mat, self.u, update=self.training, eps=self.eps)
142
+ if self.training:
143
+ with torch.no_grad():
144
+ for i, sv in enumerate(svs):
145
+ self.sv[i][:] = sv
146
+ return self.weight / svs[0]
147
+
148
+ class SNConv2d(nn.Conv2d, SN):
149
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
150
+ padding=0, dilation=1, groups=1, bias=True,
151
+ num_svs=1, num_itrs=1, eps=1e-12):
152
+ nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride,
153
+ padding, dilation, groups, bias)
154
+ SN.__init__(self, num_svs, num_itrs, out_channels, eps=eps)
155
+
156
+ def forward(self, x):
157
+ return F.conv2d(x, self.W_(), self.bias, self.stride,
158
+ self.padding, self.dilation, self.groups)
159
+
160
+ def forward_wo_sn(self, x):
161
+ return F.conv2d(x, self.weight, self.bias, self.stride,
162
+ self.padding, self.dilation, self.groups)
163
+
164
+
165
+ class SNLinear(nn.Linear, SN):
166
+ def __init__(self, in_features, out_features, bias=True,
167
+ num_svs=1, num_itrs=1, eps=1e-12):
168
+ nn.Linear.__init__(self, in_features, out_features, bias)
169
+ SN.__init__(self, num_svs, num_itrs, out_features, eps=eps)
170
+
171
+ def forward(self, x):
172
+ return F.linear(x, self.W_(), self.bias)
173
+
174
+
175
+ class Attention(nn.Module):
176
+ def __init__(
177
+ self,
178
+ ch,
179
+ which_conv=SNConv2d,
180
+ name='attention'
181
+ ):
182
+ super(Attention, self).__init__()
183
+ self.ch = ch
184
+ self.which_conv = which_conv
185
+ self.theta = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False)
186
+ self.phi = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False)
187
+ self.g = self.which_conv(self.ch, self.ch // 2, kernel_size=1, padding=0, bias=False)
188
+ self.o = self.which_conv(self.ch // 2, self.ch, kernel_size=1, padding=0, bias=False)
189
+ # Learnable gain parameter
190
+ self.gamma = P(torch.tensor(0.), requires_grad=True)
191
+
192
+ def forward(self, x, y=None):
193
+ theta = self.theta(x)
194
+ phi = F.max_pool2d(self.phi(x), [2,2])
195
+ g = F.max_pool2d(self.g(x), [2,2])
196
+
197
+ theta = theta.view(-1, self. ch // 8, x.shape[2] * x.shape[3])
198
+ phi = phi.view(-1, self. ch // 8, x.shape[2] * x.shape[3] // 4)
199
+ g = g.view(-1, self. ch // 2, x.shape[2] * x.shape[3] // 4)
200
+
201
+ beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1)
202
+
203
+ o = self.o(torch.bmm(g, beta.transpose(1,2)).view(-1, self.ch // 2, x.shape[2], x.shape[3]))
204
+ return self.gamma * o + x
205
+
206
+
207
+ class DBlock(nn.Module):
208
+ def __init__(self, in_channels, out_channels, which_conv=SNConv2d, wide=True,
209
+ preactivation=False, activation=None, downsample=None,):
210
+ super(DBlock, self).__init__()
211
+
212
+ self.in_channels, self.out_channels = in_channels, out_channels
213
+
214
+ self.hidden_channels = self.out_channels if wide else self.in_channels
215
+ self.which_conv = which_conv
216
+ self.preactivation = preactivation
217
+ self.activation = activation
218
+ self.downsample = downsample
219
+
220
+ # Conv layers
221
+ self.conv1 = self.which_conv(self.in_channels, self.hidden_channels)
222
+ self.conv2 = self.which_conv(self.hidden_channels, self.out_channels)
223
+ self.learnable_sc = True if (in_channels != out_channels) or downsample else False
224
+ if self.learnable_sc:
225
+ self.conv_sc = self.which_conv(in_channels, out_channels,
226
+ kernel_size=1, padding=0)
227
+ def shortcut(self, x):
228
+ if self.preactivation:
229
+ if self.learnable_sc:
230
+ x = self.conv_sc(x)
231
+ if self.downsample:
232
+ x = self.downsample(x)
233
+ else:
234
+ if self.downsample:
235
+ x = self.downsample(x)
236
+ if self.learnable_sc:
237
+ x = self.conv_sc(x)
238
+ return x
239
+
240
+ def forward(self, x):
241
+ if self.preactivation:
242
+ h = F.relu(x)
243
+ else:
244
+ h = x
245
+ h = self.conv1(h)
246
+ h = self.conv2(self.activation(h))
247
+ if self.downsample:
248
+ h = self.downsample(h)
249
+
250
+ return h + self.shortcut(x)
251
+
252
+
253
+ class GBlock(nn.Module):
254
+ def __init__(self, in_channels, out_channels,
255
+ which_conv=nn.Conv2d,which_bn= nn.BatchNorm2d, activation=None,
256
+ upsample=None):
257
+ super(GBlock, self).__init__()
258
+
259
+ self.in_channels, self.out_channels = in_channels, out_channels
260
+ self.which_conv,self.which_bn =which_conv, which_bn
261
+ self.activation = activation
262
+ self.upsample = upsample
263
+ # Conv layers
264
+ self.conv1 = self.which_conv(self.in_channels, self.out_channels)
265
+ self.conv2 = self.which_conv(self.out_channels, self.out_channels)
266
+ self.learnable_sc = in_channels != out_channels or upsample
267
+ if self.learnable_sc:
268
+ self.conv_sc = self.which_conv(in_channels, out_channels,
269
+ kernel_size=1, padding=0)
270
+ # Batchnorm layers
271
+ self.bn1 = self.which_bn(in_channels)
272
+ self.bn2 = self.which_bn(out_channels)
273
+ # upsample layers
274
+ self.upsample = upsample
275
+
276
+
277
+ def forward(self, x):
278
+ h = self.activation(self.bn1(x))
279
+ if self.upsample:
280
+ h = self.upsample(h)
281
+ x = self.upsample(x)
282
+ h = self.conv1(h)
283
+ h = self.activation(self.bn2(h))
284
+ h = self.conv2(h)
285
+ if self.learnable_sc:
286
+ x = self.conv_sc(x)
287
+ return h + x
288
+
289
+
290
+ class GBlock2(nn.Module):
291
+ def __init__(self, in_channels, out_channels,
292
+ which_conv=nn.Conv2d, activation=None,
293
+ upsample=None, skip_connection = True):
294
+ super(GBlock2, self).__init__()
295
+
296
+ self.in_channels, self.out_channels = in_channels, out_channels
297
+ self.which_conv = which_conv
298
+ self.activation = activation
299
+ self.upsample = upsample
300
+
301
+ # Conv layers
302
+ self.conv1 = self.which_conv(self.in_channels, self.out_channels)
303
+ self.conv2 = self.which_conv(self.out_channels, self.out_channels)
304
+ self.learnable_sc = in_channels != out_channels or upsample
305
+ if self.learnable_sc:
306
+ self.conv_sc = self.which_conv(in_channels, out_channels,
307
+ kernel_size=1, padding=0)
308
+
309
+ # upsample layers
310
+ self.upsample = upsample
311
+ self.skip_connection = skip_connection
312
+
313
+ def forward(self, x):
314
+ h = self.activation(x)
315
+ if self.upsample:
316
+ h = self.upsample(h)
317
+ x = self.upsample(x)
318
+ h = self.conv1(h)
319
+
320
+ h = self.activation(h)
321
+ h = self.conv2(h)
322
+
323
+ if self.learnable_sc:
324
+ x = self.conv_sc(x)
325
+
326
+
327
+ if self.skip_connection:
328
+ out = h + x
329
+ else:
330
+ out = h
331
+ return out
332
+
333
+ def content_encoder_arch(ch =64,out_channel_multiplier = 1, input_nc = 3):
334
+ arch = {}
335
+ n=2
336
+ arch[80] = {'in_channels': [input_nc] + [ch*item for item in [1,2]],
337
+ 'out_channels' : [item * ch for item in [1,2,4]],
338
+ 'resolution': [40,20,10]}
339
+ arch[96] = {'in_channels': [input_nc] + [ch*item for item in [1,2]],
340
+ 'out_channels' : [item * ch for item in [1,2,4]],
341
+ 'resolution': [48,24,12]}
342
+
343
+ arch[128] = {'in_channels': [input_nc] + [ch*item for item in [1,2,4,8]],
344
+ 'out_channels' : [item * ch for item in [1,2,4,8,16]],
345
+ 'resolution': [64,32,16,8,4]}
346
+
347
+ arch[256] = {'in_channels':[input_nc]+[ch*item for item in [1,2,4,8,8]],
348
+ 'out_channels':[item*ch for item in [1,2,4,8,8,16]],
349
+ 'resolution': [128,64,32,16,8,4]}
350
+ return arch
351
+
352
+ class ContentEncoder(ModelMixin, ConfigMixin):
353
+
354
+ @register_to_config
355
+ def __init__(self, G_ch=64, G_wide=True, resolution=128,
356
+ G_kernel_size=3, G_attn='64_32_16_8', n_classes=1000,
357
+ num_G_SVs=1, num_G_SV_itrs=1, G_activation=nn.ReLU(inplace=False),
358
+ SN_eps=1e-12, output_dim=1, G_fp16=False,
359
+ G_init='N02', G_param='SN', nf_mlp = 512, nEmbedding = 256, input_nc = 3,output_nc = 3):
360
+ super(ContentEncoder, self).__init__()
361
+
362
+ self.ch = G_ch
363
+ self.G_wide = G_wide
364
+ self.resolution = resolution
365
+ self.kernel_size = G_kernel_size
366
+ self.attention = G_attn
367
+ self.n_classes = n_classes
368
+ self.activation = G_activation
369
+ self.init = G_init
370
+ self.G_param = G_param
371
+ self.SN_eps = SN_eps
372
+ self.fp16 = G_fp16
373
+
374
+ if self.resolution == 96:
375
+ self.save_featrues = [0,1,2,3,4]
376
+ elif self.resolution == 80:
377
+ self.save_featrues = [0,1,2,3,4]
378
+ elif self.resolution == 128:
379
+ self.save_featrues = [0,1,2,3,4]
380
+ elif self.resolution == 256:
381
+ self.save_featrues = [0,1,2,3,4,5]
382
+
383
+ self.out_channel_nultipiler = 1
384
+ self.arch = content_encoder_arch(self.ch, self.out_channel_nultipiler,input_nc)[resolution]
385
+
386
+ if self.G_param == 'SN':
387
+ self.which_conv = functools.partial(SNConv2d,
388
+ kernel_size=3, padding=1,
389
+ num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
390
+ eps=self.SN_eps)
391
+ self.which_linear = functools.partial(SNLinear,
392
+ num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
393
+ eps=self.SN_eps)
394
+ self.blocks = []
395
+ for index in range(len(self.arch['out_channels'])):
396
+
397
+ self.blocks += [[DBlock(in_channels=self.arch['in_channels'][index],
398
+ out_channels=self.arch['out_channels'][index],
399
+ which_conv=self.which_conv,
400
+ wide=self.G_wide,
401
+ activation=self.activation,
402
+ preactivation=(index > 0),
403
+ downsample=nn.AvgPool2d(2))]]
404
+
405
+ self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
406
+ self.init_weights()
407
+
408
+
409
+ def init_weights(self):
410
+ self.param_count = 0
411
+ for module in self.modules():
412
+ if (isinstance(module, nn.Conv2d)
413
+ or isinstance(module, nn.Linear)
414
+ or isinstance(module, nn.Embedding)):
415
+ if self.init == 'ortho':
416
+ init.orthogonal_(module.weight)
417
+ elif self.init == 'N02':
418
+ init.normal_(module.weight, 0, 0.02)
419
+ elif self.init in ['glorot', 'xavier']:
420
+ init.xavier_uniform_(module.weight)
421
+ else:
422
+ print('Init style not recognized...')
423
+ self.param_count += sum([p.data.nelement() for p in module.parameters()])
424
+ print('Param count for D''s initialized parameters: %d' % self.param_count)
425
+
426
+ def forward(self,x):
427
+ h = x
428
+ residual_features = []
429
+ residual_features.append(h)
430
+ for index, blocklist in enumerate(self.blocks):
431
+ for block in blocklist:
432
+ h = block(h)
433
+ if index in self.save_featrues[:-1]:
434
+ residual_features.append(h)
435
+ return h,residual_features
src/modules/embeddings.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+
7
+ def get_timestep_embedding(
8
+ timesteps: torch.Tensor,
9
+ embedding_dim: int,
10
+ flip_sin_to_cos: bool = False,
11
+ downscale_freq_shift: float = 1,
12
+ scale: float = 1,
13
+ max_period: int = 10000,
14
+ ):
15
+ """
16
+ This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
17
+
18
+ :param timesteps: a 1-D Tensor of N indices, one per batch element.
19
+ These may be fractional.
20
+ :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the
21
+ embeddings. :return: an [N x dim] Tensor of positional embeddings.
22
+ """
23
+ assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array"
24
+
25
+ half_dim = embedding_dim // 2
26
+ exponent = -math.log(max_period) * torch.arange(
27
+ start=0, end=half_dim, dtype=torch.float32, device=timesteps.device
28
+ )
29
+ exponent = exponent / (half_dim - downscale_freq_shift)
30
+
31
+ emb = torch.exp(exponent)
32
+ emb = timesteps[:, None].float() * emb[None, :]
33
+
34
+ # scale embeddings
35
+ emb = scale * emb
36
+
37
+ # concat sine and cosine embeddings
38
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)
39
+
40
+ # flip sine and cosine embeddings
41
+ if flip_sin_to_cos:
42
+ emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1)
43
+
44
+ # zero pad
45
+ if embedding_dim % 2 == 1:
46
+ emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
47
+ return emb
48
+
49
+
50
+ class TimestepEmbedding(nn.Module):
51
+ def __init__(self, channel: int, time_embed_dim: int, act_fn: str = "silu"):
52
+ super().__init__()
53
+
54
+ self.linear_1 = nn.Linear(channel, time_embed_dim)
55
+ self.act = None
56
+ if act_fn == "silu":
57
+ self.act = nn.SiLU()
58
+ self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim)
59
+
60
+ def forward(self, sample):
61
+ sample = self.linear_1(sample)
62
+
63
+ if self.act is not None:
64
+ sample = self.act(sample)
65
+
66
+ sample = self.linear_2(sample)
67
+ return sample
68
+
69
+
70
+ class Timesteps(nn.Module):
71
+ def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float):
72
+ super().__init__()
73
+ self.num_channels = num_channels
74
+ self.flip_sin_to_cos = flip_sin_to_cos
75
+ self.downscale_freq_shift = downscale_freq_shift
76
+
77
+ def forward(self, timesteps):
78
+ t_emb = get_timestep_embedding(
79
+ timesteps,
80
+ self.num_channels,
81
+ flip_sin_to_cos=self.flip_sin_to_cos,
82
+ downscale_freq_shift=self.downscale_freq_shift,
83
+ )
84
+ return t_emb
src/modules/resnet.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+
7
+
8
+ def upfirdn2d_native(tensor, kernel, up=1, down=1, pad=(0, 0)):
9
+ up_x = up_y = up
10
+ down_x = down_y = down
11
+ pad_x0 = pad_y0 = pad[0]
12
+ pad_x1 = pad_y1 = pad[1]
13
+
14
+ _, channel, in_h, in_w = tensor.shape
15
+ tensor = tensor.reshape(-1, in_h, in_w, 1)
16
+
17
+ _, in_h, in_w, minor = tensor.shape
18
+ kernel_h, kernel_w = kernel.shape
19
+
20
+ out = tensor.view(-1, in_h, 1, in_w, 1, minor)
21
+
22
+ # Temporary workaround for mps specific issue: https://github.com/pytorch/pytorch/issues/84535
23
+ if tensor.device.type == "mps":
24
+ out = out.to("cpu")
25
+ out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
26
+ out = out.view(-1, in_h * up_y, in_w * up_x, minor)
27
+
28
+ out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)])
29
+ out = out.to(tensor.device) # Move back to mps if necessary
30
+ out = out[
31
+ :,
32
+ max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
33
+ max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
34
+ :,
35
+ ]
36
+
37
+ out = out.permute(0, 3, 1, 2)
38
+ out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1])
39
+ w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
40
+ out = F.conv2d(out, w)
41
+ out = out.reshape(
42
+ -1,
43
+ minor,
44
+ in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
45
+ in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
46
+ )
47
+ out = out.permute(0, 2, 3, 1)
48
+ out = out[:, ::down_y, ::down_x, :]
49
+
50
+ out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
51
+ out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
52
+
53
+ return out.view(-1, channel, out_h, out_w)
54
+
55
+
56
+ def upsample_2d(hidden_states, kernel=None, factor=2, gain=1):
57
+ r"""Upsample2D a batch of 2D images with the given filter.
58
+ Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given
59
+ filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified
60
+ `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is
61
+ a: multiple of the upsampling factor.
62
+
63
+ Args:
64
+ hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
65
+ kernel: FIR filter of the shape `[firH, firW]` or `[firN]`
66
+ (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.
67
+ factor: Integer upsampling factor (default: 2).
68
+ gain: Scaling factor for signal magnitude (default: 1.0).
69
+
70
+ Returns:
71
+ output: Tensor of the shape `[N, C, H * factor, W * factor]`
72
+ """
73
+ assert isinstance(factor, int) and factor >= 1
74
+ if kernel is None:
75
+ kernel = [1] * factor
76
+
77
+ kernel = torch.tensor(kernel, dtype=torch.float32)
78
+ if kernel.ndim == 1:
79
+ kernel = torch.outer(kernel, kernel)
80
+ kernel /= torch.sum(kernel)
81
+
82
+ kernel = kernel * (gain * (factor**2))
83
+ pad_value = kernel.shape[0] - factor
84
+ output = upfirdn2d_native(
85
+ hidden_states,
86
+ kernel.to(device=hidden_states.device),
87
+ up=factor,
88
+ pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2),
89
+ )
90
+ return output
91
+
92
+
93
+ def downsample_2d(hidden_states, kernel=None, factor=2, gain=1):
94
+ r"""Downsample2D a batch of 2D images with the given filter.
95
+ Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the
96
+ given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the
97
+ specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its
98
+ shape is a multiple of the downsampling factor.
99
+
100
+ Args:
101
+ hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
102
+ kernel: FIR filter of the shape `[firH, firW]` or `[firN]`
103
+ (separable). The default is `[1] * factor`, which corresponds to average pooling.
104
+ factor: Integer downsampling factor (default: 2).
105
+ gain: Scaling factor for signal magnitude (default: 1.0).
106
+
107
+ Returns:
108
+ output: Tensor of the shape `[N, C, H // factor, W // factor]`
109
+ """
110
+
111
+ assert isinstance(factor, int) and factor >= 1
112
+ if kernel is None:
113
+ kernel = [1] * factor
114
+
115
+ kernel = torch.tensor(kernel, dtype=torch.float32)
116
+ if kernel.ndim == 1:
117
+ kernel = torch.outer(kernel, kernel)
118
+ kernel /= torch.sum(kernel)
119
+
120
+ kernel = kernel * gain
121
+ pad_value = kernel.shape[0] - factor
122
+ output = upfirdn2d_native(
123
+ hidden_states, kernel.to(device=hidden_states.device), down=factor, pad=((pad_value + 1) // 2, pad_value // 2)
124
+ )
125
+ return output
126
+
127
+
128
+ class Mish(torch.nn.Module):
129
+ def forward(self, hidden_states):
130
+ return hidden_states * torch.tanh(torch.nn.functional.softplus(hidden_states))
131
+
132
+
133
+ class Downsample2D(nn.Module):
134
+ """
135
+ A downsampling layer with an optional convolution.
136
+
137
+ Parameters:
138
+ channels: channels in the inputs and outputs.
139
+ use_conv: a bool determining if a convolution is applied.
140
+ dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions.
141
+ """
142
+
143
+ def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
144
+ super().__init__()
145
+ self.channels = channels
146
+ self.out_channels = out_channels or channels
147
+ self.use_conv = use_conv
148
+ self.padding = padding
149
+ stride = 2
150
+ self.name = name
151
+
152
+ if use_conv:
153
+ conv = nn.Conv2d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
154
+ else:
155
+ assert self.channels == self.out_channels
156
+ conv = nn.AvgPool2d(kernel_size=stride, stride=stride)
157
+
158
+ # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
159
+ if name == "conv":
160
+ self.Conv2d_0 = conv
161
+ self.conv = conv
162
+ elif name == "Conv2d_0":
163
+ self.conv = conv
164
+ else:
165
+ self.conv = conv
166
+
167
+ def forward(self, hidden_states):
168
+ assert hidden_states.shape[1] == self.channels
169
+ if self.use_conv and self.padding == 0:
170
+ pad = (0, 1, 0, 1)
171
+ hidden_states = F.pad(hidden_states, pad, mode="constant", value=0)
172
+
173
+ assert hidden_states.shape[1] == self.channels
174
+ hidden_states = self.conv(hidden_states)
175
+
176
+ return hidden_states
177
+
178
+
179
+ class ResnetBlock2D(nn.Module):
180
+ def __init__(
181
+ self,
182
+ *,
183
+ in_channels,
184
+ out_channels=None,
185
+ conv_shortcut=False,
186
+ dropout=0.0,
187
+ temb_channels=512,
188
+ groups=32,
189
+ groups_out=None,
190
+ pre_norm=True,
191
+ eps=1e-6,
192
+ non_linearity="swish",
193
+ time_embedding_norm="default",
194
+ kernel=None,
195
+ output_scale_factor=1.0,
196
+ use_in_shortcut=None,
197
+ up=False,
198
+ down=False,
199
+ ):
200
+ super().__init__()
201
+ self.pre_norm = pre_norm
202
+ self.pre_norm = True
203
+ self.in_channels = in_channels
204
+ out_channels = in_channels if out_channels is None else out_channels
205
+ self.out_channels = out_channels
206
+ self.use_conv_shortcut = conv_shortcut
207
+ self.time_embedding_norm = time_embedding_norm
208
+ self.up = up
209
+ self.down = down
210
+ self.output_scale_factor = output_scale_factor
211
+
212
+ if groups_out is None:
213
+ groups_out = groups
214
+
215
+ self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
216
+
217
+ self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
218
+
219
+ if temb_channels is not None:
220
+ self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels)
221
+ else:
222
+ self.time_emb_proj = None
223
+
224
+ self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
225
+ self.dropout = torch.nn.Dropout(dropout)
226
+ self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
227
+
228
+ if non_linearity == "swish":
229
+ self.nonlinearity = lambda x: F.silu(x)
230
+ elif non_linearity == "mish":
231
+ self.nonlinearity = Mish()
232
+ elif non_linearity == "silu":
233
+ self.nonlinearity = nn.SiLU()
234
+
235
+ self.upsample = self.downsample = None
236
+ if self.up:
237
+ if kernel == "fir":
238
+ fir_kernel = (1, 3, 3, 1)
239
+ self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel)
240
+ elif kernel == "sde_vp":
241
+ self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest")
242
+ else:
243
+ self.upsample = Upsample2D(in_channels, use_conv=False)
244
+ elif self.down:
245
+ if kernel == "fir":
246
+ fir_kernel = (1, 3, 3, 1)
247
+ self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel)
248
+ elif kernel == "sde_vp":
249
+ self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2)
250
+ else:
251
+ self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op")
252
+
253
+ self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut
254
+
255
+ self.conv_shortcut = None
256
+ if self.use_in_shortcut:
257
+ self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
258
+
259
+ def forward(self, input_tensor, temb):
260
+ hidden_states = input_tensor
261
+
262
+ hidden_states = self.norm1(hidden_states) # hidden_states: torch.Size([1, 128, 64, 64])
263
+ hidden_states = self.nonlinearity(hidden_states)
264
+
265
+ if self.upsample is not None: # when crossattn, both upsample and downsample is None
266
+ input_tensor = self.upsample(input_tensor)
267
+ hidden_states = self.upsample(hidden_states)
268
+ elif self.downsample is not None:
269
+ input_tensor = self.downsample(input_tensor)
270
+ hidden_states = self.downsample(hidden_states)
271
+
272
+ hidden_states = self.conv1(hidden_states)
273
+
274
+ if temb is not None:
275
+ temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None]
276
+ hidden_states = hidden_states + temb # just add together
277
+
278
+ hidden_states = self.norm2(hidden_states)
279
+ hidden_states = self.nonlinearity(hidden_states)
280
+
281
+ hidden_states = self.dropout(hidden_states)
282
+ hidden_states = self.conv2(hidden_states)
283
+
284
+ if self.conv_shortcut is not None:
285
+ input_tensor = self.conv_shortcut(input_tensor)
286
+
287
+ output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
288
+
289
+ return output_tensor
290
+
291
+
292
+ class Upsample2D(nn.Module):
293
+ """
294
+ An upsampling layer with an optional convolution.
295
+
296
+ Parameters:
297
+ channels: channels in the inputs and outputs.
298
+ use_conv: a bool determining if a convolution is applied.
299
+ dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions.
300
+ """
301
+
302
+ def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"):
303
+ super().__init__()
304
+ self.channels = channels
305
+ self.out_channels = out_channels or channels
306
+ self.use_conv = use_conv
307
+ self.use_conv_transpose = use_conv_transpose
308
+ self.name = name
309
+
310
+ conv = None
311
+ if use_conv_transpose:
312
+ conv = nn.ConvTranspose2d(channels, self.out_channels, 4, 2, 1)
313
+ elif use_conv:
314
+ conv = nn.Conv2d(self.channels, self.out_channels, 3, padding=1)
315
+
316
+ # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
317
+ if name == "conv":
318
+ self.conv = conv
319
+ else:
320
+ self.Conv2d_0 = conv
321
+
322
+ def forward(self, hidden_states, output_size=None):
323
+ assert hidden_states.shape[1] == self.channels
324
+
325
+ if self.use_conv_transpose:
326
+ return self.conv(hidden_states)
327
+
328
+ # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
329
+ # TODO(Suraj): Remove this cast once the issue is fixed in PyTorch
330
+ # https://github.com/pytorch/pytorch/issues/86679
331
+ dtype = hidden_states.dtype
332
+ if dtype == torch.bfloat16:
333
+ hidden_states = hidden_states.to(torch.float32)
334
+
335
+ # if `output_size` is passed we force the interpolation output
336
+ # size and do not make use of `scale_factor=2`
337
+ if output_size is None:
338
+ hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
339
+ else:
340
+ hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")
341
+
342
+ # If the input is bfloat16, we cast back to bfloat16
343
+ if dtype == torch.bfloat16:
344
+ hidden_states = hidden_states.to(dtype)
345
+
346
+ # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
347
+ if self.use_conv:
348
+ if self.name == "conv":
349
+ hidden_states = self.conv(hidden_states)
350
+ else:
351
+ hidden_states = self.Conv2d_0(hidden_states)
352
+
353
+ return hidden_states
src/modules/style_encoder.py ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from torch.nn import init
7
+
8
+ from diffusers import ModelMixin
9
+ from diffusers.configuration_utils import (ConfigMixin,
10
+ register_to_config)
11
+
12
+
13
+ def proj(x, y):
14
+ return torch.mm(y, x.t()) * y / torch.mm(y, y.t())
15
+
16
+
17
+ def gram_schmidt(x, ys):
18
+ for y in ys:
19
+ x = x - proj(x, y)
20
+ return x
21
+
22
+
23
+ def power_iteration(W, u_, update=True, eps=1e-12):
24
+ us, vs, svs = [], [], []
25
+ for i, u in enumerate(u_):
26
+ with torch.no_grad():
27
+ v = torch.matmul(u, W)
28
+ v = F.normalize(gram_schmidt(v, vs), eps=eps)
29
+ vs += [v]
30
+ u = torch.matmul(v, W.t())
31
+ u = F.normalize(gram_schmidt(u, us), eps=eps)
32
+ us += [u]
33
+ if update:
34
+ u_[i][:] = u
35
+ svs += [torch.squeeze(torch.matmul(torch.matmul(v, W.t()), u.t()))]
36
+ return svs, us, vs
37
+
38
+
39
+ class LinearBlock(nn.Module):
40
+ def __init__(
41
+ self,
42
+ in_dim,
43
+ out_dim,
44
+ norm='none',
45
+ act='relu',
46
+ use_sn=False
47
+ ):
48
+ super(LinearBlock, self).__init__()
49
+ use_bias = True
50
+ self.fc = nn.Linear(in_dim, out_dim, bias=use_bias)
51
+ if use_sn:
52
+ self.fc = nn.utils.spectral_norm(self.fc)
53
+
54
+ # initialize normalization
55
+ norm_dim = out_dim
56
+ if norm == 'bn':
57
+ self.norm = nn.BatchNorm1d(norm_dim)
58
+ elif norm == 'in':
59
+ self.norm = nn.InstanceNorm1d(norm_dim)
60
+ elif norm == 'none':
61
+ self.norm = None
62
+ else:
63
+ assert 0, "Unsupported normalization: {}".format(norm)
64
+
65
+ # initialize activation
66
+ if act == 'relu':
67
+ self.activation = nn.ReLU(inplace=True)
68
+ elif act == 'lrelu':
69
+ self.activation = nn.LeakyReLU(0.2, inplace=True)
70
+ elif act == 'tanh':
71
+ self.activation = nn.Tanh()
72
+ elif act == 'none':
73
+ self.activation = None
74
+ else:
75
+ assert 0, "Unsupported activation: {}".format(act)
76
+
77
+ def forward(self, x):
78
+ out = self.fc(x)
79
+ if self.norm:
80
+ out = self.norm(out)
81
+ if self.activation:
82
+ out = self.activation(out)
83
+ return out
84
+
85
+
86
+ class MLP(nn.Module):
87
+ def __init__(
88
+ self,
89
+ nf_in,
90
+ nf_out,
91
+ nf_mlp,
92
+ num_blocks,
93
+ norm,
94
+ act,
95
+ use_sn =False
96
+ ):
97
+ super(MLP,self).__init__()
98
+ self.model = nn.ModuleList()
99
+ nf = nf_mlp
100
+ self.model.append(LinearBlock(nf_in, nf, norm = norm, act = act, use_sn = use_sn))
101
+ for _ in range((num_blocks - 2)):
102
+ self.model.append(LinearBlock(nf, nf, norm=norm, act=act, use_sn=use_sn))
103
+ self.model.append(LinearBlock(nf, nf_out, norm='none', act ='none', use_sn = use_sn))
104
+ self.model = nn.Sequential(*self.model)
105
+
106
+ def forward(self, x):
107
+ return self.model(x.view(x.size(0), -1))
108
+
109
+
110
+ class SN(object):
111
+ def __init__(self, num_svs, num_itrs, num_outputs, transpose=False, eps=1e-12):
112
+ self.num_itrs = num_itrs
113
+ self.num_svs = num_svs
114
+ self.transpose = transpose
115
+ self.eps = eps
116
+ for i in range(self.num_svs):
117
+ self.register_buffer('u%d' % i, torch.randn(1, num_outputs))
118
+ self.register_buffer('sv%d' % i, torch.ones(1))
119
+
120
+ @property
121
+ def u(self):
122
+ return [getattr(self, 'u%d' % i) for i in range(self.num_svs)]
123
+
124
+ @property
125
+ def sv(self):
126
+ return [getattr(self, 'sv%d' % i) for i in range(self.num_svs)]
127
+
128
+ def W_(self):
129
+ W_mat = self.weight.view(self.weight.size(0), -1)
130
+ if self.transpose:
131
+ W_mat = W_mat.t()
132
+ for _ in range(self.num_itrs):
133
+ svs, us, vs = power_iteration(W_mat, self.u, update=self.training, eps=self.eps)
134
+ if self.training:
135
+ with torch.no_grad():
136
+ for i, sv in enumerate(svs):
137
+ self.sv[i][:] = sv
138
+ return self.weight / svs[0]
139
+
140
+
141
+ class SNConv2d(nn.Conv2d, SN):
142
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
143
+ padding=0, dilation=1, groups=1, bias=True,
144
+ num_svs=1, num_itrs=1, eps=1e-12):
145
+ nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride,
146
+ padding, dilation, groups, bias)
147
+ SN.__init__(self, num_svs, num_itrs, out_channels, eps=eps)
148
+
149
+ def forward(self, x):
150
+ return F.conv2d(x, self.W_(), self.bias, self.stride,
151
+ self.padding, self.dilation, self.groups)
152
+
153
+ def forward_wo_sn(self, x):
154
+ return F.conv2d(x, self.weight, self.bias, self.stride,
155
+ self.padding, self.dilation, self.groups)
156
+
157
+
158
+ class SNLinear(nn.Linear, SN):
159
+ def __init__(self, in_features, out_features, bias=True,
160
+ num_svs=1, num_itrs=1, eps=1e-12):
161
+ nn.Linear.__init__(self, in_features, out_features, bias)
162
+ SN.__init__(self, num_svs, num_itrs, out_features, eps=eps)
163
+
164
+ def forward(self, x):
165
+ return F.linear(x, self.W_(), self.bias)
166
+
167
+
168
+ class DBlock(nn.Module):
169
+ def __init__(self, in_channels, out_channels, which_conv=SNConv2d, wide=True,
170
+ preactivation=False, activation=None, downsample=None,):
171
+ super(DBlock, self).__init__()
172
+
173
+ self.in_channels, self.out_channels = in_channels, out_channels
174
+
175
+ self.hidden_channels = self.out_channels if wide else self.in_channels
176
+ self.which_conv = which_conv
177
+ self.preactivation = preactivation
178
+ self.activation = activation
179
+ self.downsample = downsample
180
+
181
+ # Conv layers
182
+ self.conv1 = self.which_conv(self.in_channels, self.hidden_channels)
183
+ self.conv2 = self.which_conv(self.hidden_channels, self.out_channels)
184
+ self.learnable_sc = True if (in_channels != out_channels) or downsample else False
185
+ if self.learnable_sc:
186
+ self.conv_sc = self.which_conv(in_channels, out_channels,
187
+ kernel_size=1, padding=0)
188
+ def shortcut(self, x):
189
+ if self.preactivation:
190
+ if self.learnable_sc:
191
+ x = self.conv_sc(x)
192
+ if self.downsample:
193
+ x = self.downsample(x)
194
+ else:
195
+ if self.downsample:
196
+ x = self.downsample(x)
197
+ if self.learnable_sc:
198
+ x = self.conv_sc(x)
199
+ return x
200
+
201
+ def forward(self, x):
202
+
203
+ if self.preactivation:
204
+ h = F.relu(x)
205
+ else:
206
+ h = x
207
+ h = self.conv1(h)
208
+ h = self.conv2(self.activation(h))
209
+ if self.downsample:
210
+ h = self.downsample(h)
211
+
212
+ return h + self.shortcut(x)
213
+
214
+
215
+ class GBlock(nn.Module):
216
+ def __init__(self, in_channels, out_channels,
217
+ which_conv=nn.Conv2d,which_bn= nn.BatchNorm2d, activation=None,
218
+ upsample=None):
219
+ super(GBlock, self).__init__()
220
+
221
+ self.in_channels, self.out_channels = in_channels, out_channels
222
+ self.which_conv,self.which_bn =which_conv, which_bn
223
+ self.activation = activation
224
+ self.upsample = upsample
225
+ # Conv layers
226
+ self.conv1 = self.which_conv(self.in_channels, self.out_channels)
227
+ self.conv2 = self.which_conv(self.out_channels, self.out_channels)
228
+ self.learnable_sc = in_channels != out_channels or upsample
229
+ if self.learnable_sc:
230
+ self.conv_sc = self.which_conv(in_channels, out_channels,
231
+ kernel_size=1, padding=0)
232
+ # Batchnorm layers
233
+ self.bn1 = self.which_bn(in_channels)
234
+ self.bn2 = self.which_bn(out_channels)
235
+ # upsample layers
236
+ self.upsample = upsample
237
+
238
+
239
+ def forward(self, x):
240
+ h = self.activation(self.bn1(x))
241
+ if self.upsample:
242
+ h = self.upsample(h)
243
+ x = self.upsample(x)
244
+ h = self.conv1(h)
245
+ h = self.activation(self.bn2(h))
246
+ h = self.conv2(h)
247
+ if self.learnable_sc:
248
+ x = self.conv_sc(x)
249
+ return h + x
250
+
251
+
252
+ class GBlock2(nn.Module):
253
+ def __init__(self, in_channels, out_channels,
254
+ which_conv=nn.Conv2d, activation=None,
255
+ upsample=None, skip_connection = True):
256
+ super(GBlock2, self).__init__()
257
+
258
+ self.in_channels, self.out_channels = in_channels, out_channels
259
+ self.which_conv = which_conv
260
+ self.activation = activation
261
+ self.upsample = upsample
262
+
263
+ # Conv layers
264
+ self.conv1 = self.which_conv(self.in_channels, self.out_channels)
265
+ self.conv2 = self.which_conv(self.out_channels, self.out_channels)
266
+ self.learnable_sc = in_channels != out_channels or upsample
267
+ if self.learnable_sc:
268
+ self.conv_sc = self.which_conv(in_channels, out_channels,
269
+ kernel_size=1, padding=0)
270
+ # upsample layers
271
+ self.upsample = upsample
272
+ self.skip_connection = skip_connection
273
+
274
+ def forward(self, x):
275
+ h = self.activation(x)
276
+ if self.upsample:
277
+ h = self.upsample(h)
278
+ x = self.upsample(x)
279
+ h = self.conv1(h)
280
+
281
+ h = self.activation(h)
282
+ h = self.conv2(h)
283
+
284
+ if self.learnable_sc:
285
+ x = self.conv_sc(x)
286
+ if self.skip_connection:
287
+ out = h + x
288
+ else:
289
+ out = h
290
+ return out
291
+
292
+
293
+ def style_encoder_textedit_addskip_arch(ch =64,out_channel_multiplier = 1, input_nc = 3):
294
+ arch = {}
295
+ n=2
296
+ arch[96] = {'in_channels': [input_nc] + [ch*item for item in [1,2,4,8]],
297
+ 'out_channels' : [item * ch for item in [1,2,4,8,16]],
298
+ 'resolution': [48,24,12,6,3]}
299
+
300
+ arch[128] = {'in_channels': [input_nc] + [ch*item for item in [1,2,4,8]],
301
+ 'out_channels' : [item * ch for item in [1,2,4,8,16]],
302
+ 'resolution': [64,32,16,8,4]}
303
+
304
+ arch[256] = {'in_channels':[input_nc]+[ch*item for item in [1,2,4,8,8]],
305
+ 'out_channels':[item*ch for item in [1,2,4,8,8,16]],
306
+ 'resolution': [128,64,32,16,8,4]}
307
+ return arch
308
+
309
+
310
+ class StyleEncoder(ModelMixin, ConfigMixin):
311
+ """
312
+ This class is to encode the style image to image embedding.
313
+ Downsample scale is 32.
314
+ For example:
315
+ Input: Shape[Batch, 3, 128, 128]
316
+ Output: Shape[Batch, 255, 4, 4]
317
+ """
318
+ @register_to_config
319
+ def __init__(
320
+ self,
321
+ G_ch=64,
322
+ G_wide=True,
323
+ resolution=128,
324
+ G_kernel_size=3,
325
+ G_attn='64_32_16_8',
326
+ n_classes=1000,
327
+ num_G_SVs=1,
328
+ num_G_SV_itrs=1,
329
+ G_activation=nn.ReLU(inplace=False),
330
+ SN_eps=1e-12,
331
+ output_dim=1,
332
+ G_fp16=False,
333
+ G_init='N02',
334
+ G_param='SN',
335
+ nf_mlp = 512,
336
+ nEmbedding = 256,
337
+ input_nc = 3,
338
+ output_nc = 3
339
+ ):
340
+ super(StyleEncoder, self).__init__()
341
+
342
+ self.ch = G_ch
343
+ self.G_wide = G_wide
344
+ self.resolution = resolution
345
+ self.kernel_size = G_kernel_size
346
+ self.attention = G_attn
347
+ self.n_classes = n_classes
348
+ self.activation = G_activation
349
+ self.init = G_init
350
+ self.G_param = G_param
351
+ self.SN_eps = SN_eps
352
+ self.fp16 = G_fp16
353
+
354
+ if self.resolution == 96:
355
+ self.save_featrues = [0,1,2,3,4]
356
+ if self.resolution == 128:
357
+ self.save_featrues = [0,1,2,3,4]
358
+ elif self.resolution == 256:
359
+ self.save_featrues = [0,1,2,3,4,5]
360
+
361
+ self.out_channel_nultipiler = 1
362
+ self.arch = style_encoder_textedit_addskip_arch(
363
+ self.ch,
364
+ self.out_channel_nultipiler,
365
+ input_nc
366
+ )[resolution]
367
+
368
+ if self.G_param == 'SN':
369
+ self.which_conv = functools.partial(
370
+ SNConv2d,
371
+ kernel_size=3, padding=1,
372
+ num_svs=num_G_SVs,
373
+ num_itrs=num_G_SV_itrs,
374
+ eps=self.SN_eps
375
+ )
376
+ self.which_linear = functools.partial(
377
+ SNLinear,
378
+ num_svs=num_G_SVs,
379
+ num_itrs=num_G_SV_itrs,
380
+ eps=self.SN_eps
381
+ )
382
+ self.blocks = []
383
+ for index in range(len(self.arch['out_channels'])):
384
+
385
+ self.blocks += [[DBlock(
386
+ in_channels=self.arch['in_channels'][index],
387
+ out_channels=self.arch['out_channels'][index],
388
+ which_conv=self.which_conv,
389
+ wide=self.G_wide,
390
+ activation=self.activation,
391
+ preactivation=(index > 0),
392
+ downsample=nn.AvgPool2d(2)
393
+ )]]
394
+
395
+ self.blocks = nn.ModuleList([
396
+ nn.ModuleList(block) for block in self.blocks
397
+ ])
398
+ last_layer = nn.Sequential(
399
+ nn.InstanceNorm2d(self.arch['out_channels'][-1]),
400
+ self.activation,
401
+ nn.Conv2d(
402
+ self.arch['out_channels'][-1],
403
+ self.arch['out_channels'][-1],
404
+ kernel_size=1,
405
+ stride=1
406
+ )
407
+ )
408
+ self.blocks.append(last_layer)
409
+ self.init_weights()
410
+
411
+ def init_weights(self):
412
+ self.param_count = 0
413
+ for module in self.modules():
414
+ if (isinstance(module, nn.Conv2d)
415
+ or isinstance(module, nn.Linear)
416
+ or isinstance(module, nn.Embedding)):
417
+ if self.init == 'ortho':
418
+ init.orthogonal_(module.weight)
419
+ elif self.init == 'N02':
420
+ init.normal_(module.weight, 0, 0.02)
421
+ elif self.init in ['glorot', 'xavier']:
422
+ init.xavier_uniform_(module.weight)
423
+ else:
424
+ print('Init style not recognized...')
425
+ self.param_count += sum([p.data.nelement() for p in module.parameters()])
426
+ print('Param count for D''s initialized parameters: %d' % self.param_count)
427
+
428
+ def forward(self,x):
429
+ h = x
430
+ residual_features = []
431
+ residual_features.append(h)
432
+ for index, blocklist in enumerate(self.blocks):
433
+ for block in blocklist:
434
+ h = block(h)
435
+ if index in self.save_featrues[:-1]:
436
+ residual_features.append(h)
437
+ h = self.blocks[-1](h)
438
+ style_emd = h
439
+ h = F.adaptive_avg_pool2d(h,(1,1))
440
+ h = h.view(h.size(0),-1)
441
+
442
+ return style_emd,h,residual_features
src/modules/unet.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Optional, Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.utils.checkpoint
7
+
8
+ from diffusers import ModelMixin
9
+ from diffusers.configuration_utils import (ConfigMixin,
10
+ register_to_config)
11
+ from diffusers.utils import BaseOutput, logging
12
+
13
+ from .embeddings import TimestepEmbedding, Timesteps
14
+ from .unet_blocks import (DownBlock2D,
15
+ UNetMidMCABlock2D,
16
+ UpBlock2D,
17
+ get_down_block,
18
+ get_up_block)
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ @dataclass
25
+ class UNetOutput(BaseOutput):
26
+ sample: torch.FloatTensor
27
+
28
+
29
+ class UNet(ModelMixin, ConfigMixin):
30
+ _supports_gradient_checkpointing = True
31
+
32
+ @register_to_config
33
+ def __init__(
34
+ self,
35
+ sample_size: Optional[int] = None,
36
+ in_channels: int = 4,
37
+ out_channels: int = 4,
38
+ flip_sin_to_cos: bool = True,
39
+ freq_shift: int = 0,
40
+ down_block_types: Tuple[str] = None,
41
+ up_block_types: Tuple[str] = None,
42
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
43
+ layers_per_block: int = 1,
44
+ downsample_padding: int = 1,
45
+ mid_block_scale_factor: float = 1,
46
+ act_fn: str = "silu",
47
+ norm_num_groups: int = 32,
48
+ norm_eps: float = 1e-5,
49
+ cross_attention_dim: int = 1280,
50
+ attention_head_dim: int = 8,
51
+ channel_attn: bool = False,
52
+ content_encoder_downsample_size: int = 4,
53
+ content_start_channel: int = 16,
54
+ reduction: int = 32,
55
+ ):
56
+ super().__init__()
57
+
58
+ self.content_encoder_downsample_size = content_encoder_downsample_size
59
+
60
+ self.sample_size = sample_size
61
+ time_embed_dim = block_out_channels[0] * 4
62
+
63
+ # input
64
+ self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
65
+
66
+ # time
67
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
68
+ timestep_input_dim = block_out_channels[0]
69
+
70
+ self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
71
+
72
+ self.down_blocks = nn.ModuleList([])
73
+ self.mid_block = None
74
+ self.up_blocks = nn.ModuleList([])
75
+
76
+ # down
77
+ output_channel = block_out_channels[0]
78
+ for i, down_block_type in enumerate(down_block_types):
79
+ input_channel = output_channel
80
+ output_channel = block_out_channels[i]
81
+ is_final_block = i == len(block_out_channels) - 1
82
+
83
+ if i != 0:
84
+ content_channel = content_start_channel * (2 ** (i-1))
85
+ else:
86
+ content_channel = 0
87
+
88
+ print("Load the down block ", down_block_type)
89
+ down_block = get_down_block(
90
+ down_block_type,
91
+ num_layers=layers_per_block,
92
+ in_channels=input_channel,
93
+ out_channels=output_channel,
94
+ temb_channels=time_embed_dim,
95
+ add_downsample=not is_final_block,
96
+ resnet_eps=norm_eps,
97
+ resnet_act_fn=act_fn,
98
+ resnet_groups=norm_num_groups,
99
+ cross_attention_dim=cross_attention_dim,
100
+ attn_num_head_channels=attention_head_dim,
101
+ downsample_padding=downsample_padding,
102
+ content_channel=content_channel,
103
+ reduction=reduction,
104
+ channel_attn=channel_attn,
105
+ )
106
+ self.down_blocks.append(down_block)
107
+
108
+ # mid
109
+ self.mid_block = UNetMidMCABlock2D(
110
+ in_channels=block_out_channels[-1],
111
+ temb_channels=time_embed_dim,
112
+ channel_attn=channel_attn,
113
+ resnet_eps=norm_eps,
114
+ resnet_act_fn=act_fn,
115
+ output_scale_factor=mid_block_scale_factor,
116
+ resnet_time_scale_shift="default",
117
+ cross_attention_dim=cross_attention_dim,
118
+ attn_num_head_channels=attention_head_dim,
119
+ resnet_groups=norm_num_groups,
120
+ content_channel=content_start_channel*(2**(content_encoder_downsample_size - 1)),
121
+ reduction=reduction,
122
+ )
123
+
124
+ # count how many layers upsample the images
125
+ self.num_upsamplers = 0
126
+
127
+ # up
128
+ reversed_block_out_channels = list(reversed(block_out_channels))
129
+ output_channel = reversed_block_out_channels[0]
130
+ for i, up_block_type in enumerate(up_block_types):
131
+ is_final_block = i == len(block_out_channels) - 1
132
+
133
+ prev_output_channel = output_channel
134
+ output_channel = reversed_block_out_channels[i]
135
+ input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
136
+
137
+ # add upsample block for all BUT final layer
138
+ if not is_final_block:
139
+ add_upsample = True
140
+ self.num_upsamplers += 1
141
+ else:
142
+ add_upsample = False
143
+
144
+ content_channel = content_start_channel * (2 ** (content_encoder_downsample_size - i - 1))
145
+
146
+ print("Load the up block ", up_block_type)
147
+ up_block = get_up_block(
148
+ up_block_type,
149
+ num_layers=layers_per_block + 1, # larger 1 than the down block
150
+ in_channels=input_channel,
151
+ out_channels=output_channel,
152
+ prev_output_channel=prev_output_channel,
153
+ temb_channels=time_embed_dim,
154
+ add_upsample=add_upsample,
155
+ resnet_eps=norm_eps,
156
+ resnet_act_fn=act_fn,
157
+ resnet_groups=norm_num_groups,
158
+ cross_attention_dim=cross_attention_dim,
159
+ attn_num_head_channels=attention_head_dim,
160
+ upblock_index=i,
161
+ )
162
+ self.up_blocks.append(up_block)
163
+ prev_output_channel = output_channel
164
+
165
+ # out
166
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
167
+ self.conv_act = nn.SiLU()
168
+ self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
169
+
170
+ def set_attention_slice(self, slice_size):
171
+ if slice_size is not None and self.config.attention_head_dim % slice_size != 0:
172
+ raise ValueError(
173
+ f"Make sure slice_size {slice_size} is a divisor of "
174
+ f"the number of heads used in cross_attention {self.config.attention_head_dim}"
175
+ )
176
+ if slice_size is not None and slice_size > self.config.attention_head_dim:
177
+ raise ValueError(
178
+ f"Chunk_size {slice_size} has to be smaller or equal to "
179
+ f"the number of heads used in cross_attention {self.config.attention_head_dim}"
180
+ )
181
+
182
+ for block in self.down_blocks:
183
+ if hasattr(block, "attentions") and block.attentions is not None:
184
+ block.set_attention_slice(slice_size)
185
+
186
+ self.mid_block.set_attention_slice(slice_size)
187
+
188
+ for block in self.up_blocks:
189
+ if hasattr(block, "attentions") and block.attentions is not None:
190
+ block.set_attention_slice(slice_size)
191
+
192
+ def _set_gradient_checkpointing(self, module, value=False):
193
+ if isinstance(module, (DownBlock2D, UpBlock2D)):
194
+ module.gradient_checkpointing = value
195
+
196
+ def forward(
197
+ self,
198
+ sample: torch.FloatTensor,
199
+ timestep: Union[torch.Tensor, float, int],
200
+ encoder_hidden_states: torch.Tensor,
201
+ content_encoder_downsample_size: int = 4,
202
+ return_dict: bool = False,
203
+ ) -> Union[UNetOutput, Tuple]:
204
+ # By default samples have to be AT least a multiple of the overall upsampling factor.
205
+ # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
206
+ # However, the upsampling interpolation output size can be forced to fit any upsampling size
207
+ # on the fly if necessary.
208
+ default_overall_up_factor = 2**self.num_upsamplers
209
+
210
+ # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
211
+ forward_upsample_size = False
212
+ upsample_size = None
213
+
214
+ if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
215
+ logger.info("Forward upsample size to force interpolation output size.")
216
+ forward_upsample_size = True
217
+
218
+ # 1. time
219
+ timesteps = timestep # only one time
220
+ if not torch.is_tensor(timesteps):
221
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
222
+ timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
223
+ elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
224
+ timesteps = timesteps[None].to(sample.device)
225
+
226
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
227
+ timesteps = timesteps.expand(sample.shape[0])
228
+
229
+ t_emb = self.time_proj(timesteps)
230
+
231
+ # timesteps does not contain any weights and will always return f32 tensors
232
+ # but time_embedding might actually be running in fp16. so we need to cast here.
233
+ # there might be better ways to encapsulate this.
234
+ t_emb = t_emb.to(dtype=self.dtype)
235
+ emb = self.time_embedding(t_emb) # projection
236
+
237
+ # 2. pre-process
238
+ sample = self.conv_in(sample)
239
+
240
+ # 3. down
241
+ down_block_res_samples = (sample,)
242
+ for index, downsample_block in enumerate(self.down_blocks):
243
+ if (hasattr(downsample_block, "attentions") and downsample_block.attentions is not None) or hasattr(downsample_block, "content_attentions"):
244
+ sample, res_samples = downsample_block(
245
+ hidden_states=sample,
246
+ temb=emb,
247
+ encoder_hidden_states=encoder_hidden_states,
248
+ index=index,
249
+ )
250
+ else:
251
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
252
+
253
+ down_block_res_samples += res_samples
254
+
255
+ # 4. mid
256
+ if self.mid_block is not None:
257
+ sample = self.mid_block(
258
+ sample,
259
+ emb,
260
+ index=content_encoder_downsample_size,
261
+ encoder_hidden_states=encoder_hidden_states
262
+ )
263
+
264
+ # 5. up
265
+ offset_out_sum = 0
266
+ for i, upsample_block in enumerate(self.up_blocks):
267
+ is_final_block = i == len(self.up_blocks) - 1
268
+
269
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
270
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
271
+
272
+ # if we have not reached the final block and need to forward the
273
+ # upsample size, we do it here
274
+ if not is_final_block and forward_upsample_size:
275
+ upsample_size = down_block_res_samples[-1].shape[2:]
276
+
277
+ if (hasattr(upsample_block, "attentions") and upsample_block.attentions is not None) or hasattr(upsample_block, "content_attentions"):
278
+ sample, offset_out = upsample_block(
279
+ hidden_states=sample,
280
+ temb=emb,
281
+ res_hidden_states_tuple=res_samples,
282
+ style_structure_features=encoder_hidden_states[3],
283
+ encoder_hidden_states=encoder_hidden_states[2],
284
+ )
285
+ offset_out_sum += offset_out
286
+ else:
287
+ sample = upsample_block(
288
+ hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
289
+ )
290
+
291
+ # 6. post-process
292
+ sample = self.conv_norm_out(sample)
293
+ sample = self.conv_act(sample)
294
+ sample = self.conv_out(sample)
295
+
296
+ if not return_dict:
297
+ return (sample, offset_out_sum)
298
+
299
+ return UNetOutput(sample=sample)
src/modules/unet_blocks.py ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from torchvision.ops import DeformConv2d
4
+
5
+ from .attention import (SpatialTransformer,
6
+ OffsetRefStrucInter,
7
+ ChannelAttnBlock)
8
+ from .resnet import (Downsample2D,
9
+ ResnetBlock2D,
10
+ Upsample2D)
11
+
12
+
13
+ def get_down_block(
14
+ down_block_type,
15
+ num_layers,
16
+ in_channels,
17
+ out_channels,
18
+ temb_channels,
19
+ add_downsample,
20
+ resnet_eps,
21
+ resnet_act_fn,
22
+ attn_num_head_channels,
23
+ resnet_groups=None,
24
+ cross_attention_dim=None,
25
+ downsample_padding=None,
26
+ channel_attn=False,
27
+ content_channel=32,
28
+ reduction=32):
29
+
30
+ down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
31
+ if down_block_type == "DownBlock2D":
32
+ return DownBlock2D(
33
+ num_layers=num_layers,
34
+ in_channels=in_channels,
35
+ out_channels=out_channels,
36
+ temb_channels=temb_channels,
37
+ add_downsample=add_downsample,
38
+ resnet_eps=resnet_eps,
39
+ resnet_act_fn=resnet_act_fn,
40
+ resnet_groups=resnet_groups,
41
+ downsample_padding=downsample_padding)
42
+ elif down_block_type == "MCADownBlock2D":
43
+ if cross_attention_dim is None:
44
+ raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D")
45
+ return MCADownBlock2D(
46
+ num_layers=num_layers,
47
+ in_channels=in_channels,
48
+ out_channels=out_channels,
49
+ channel_attn=channel_attn,
50
+ temb_channels=temb_channels,
51
+ add_downsample=add_downsample,
52
+ resnet_eps=resnet_eps,
53
+ resnet_act_fn=resnet_act_fn,
54
+ resnet_groups=resnet_groups,
55
+ downsample_padding=downsample_padding,
56
+ cross_attention_dim=cross_attention_dim,
57
+ attn_num_head_channels=attn_num_head_channels,
58
+ content_channel=content_channel,
59
+ reduction=reduction)
60
+ else:
61
+ raise ValueError(f"{down_block_type} does not exist.")
62
+
63
+
64
+ def get_up_block(
65
+ up_block_type,
66
+ num_layers,
67
+ in_channels,
68
+ out_channels,
69
+ prev_output_channel,
70
+ temb_channels,
71
+ add_upsample,
72
+ resnet_eps,
73
+ resnet_act_fn,
74
+ attn_num_head_channels,
75
+ upblock_index,
76
+ resnet_groups=None,
77
+ cross_attention_dim=None,
78
+ structure_feature_begin=64):
79
+
80
+ up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
81
+ if up_block_type == "UpBlock2D":
82
+ return UpBlock2D(
83
+ num_layers=num_layers,
84
+ in_channels=in_channels,
85
+ out_channels=out_channels,
86
+ prev_output_channel=prev_output_channel,
87
+ temb_channels=temb_channels,
88
+ add_upsample=add_upsample,
89
+ resnet_eps=resnet_eps,
90
+ resnet_act_fn=resnet_act_fn,
91
+ resnet_groups=resnet_groups)
92
+ elif up_block_type == "StyleRSIUpBlock2D":
93
+ return StyleRSIUpBlock2D(
94
+ num_layers=num_layers,
95
+ in_channels=in_channels,
96
+ out_channels=out_channels,
97
+ prev_output_channel=prev_output_channel,
98
+ temb_channels=temb_channels,
99
+ add_upsample=add_upsample,
100
+ resnet_eps=resnet_eps,
101
+ resnet_act_fn=resnet_act_fn,
102
+ resnet_groups=resnet_groups,
103
+ cross_attention_dim=cross_attention_dim,
104
+ attn_num_head_channels=attn_num_head_channels,
105
+ structure_feature_begin=structure_feature_begin,
106
+ upblock_index=upblock_index)
107
+ else:
108
+ raise ValueError(f"{up_block_type} does not exist.")
109
+
110
+
111
+ class UNetMidMCABlock2D(nn.Module):
112
+ def __init__(
113
+ self,
114
+ in_channels: int,
115
+ temb_channels: int,
116
+ channel_attn: bool = False,
117
+ dropout: float = 0.0,
118
+ num_layers: int = 1,
119
+ resnet_eps: float = 1e-6,
120
+ resnet_time_scale_shift: str = "default",
121
+ resnet_act_fn: str = "swish",
122
+ resnet_groups: int = 32,
123
+ resnet_pre_norm: bool = True,
124
+ attn_num_head_channels=1,
125
+ attention_type="default",
126
+ output_scale_factor=1.0,
127
+ cross_attention_dim=1280,
128
+ content_channel=256,
129
+ reduction=32,
130
+ **kwargs,
131
+ ):
132
+ super().__init__()
133
+
134
+ self.attention_type = attention_type
135
+ self.attn_num_head_channels = attn_num_head_channels
136
+ resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
137
+
138
+ resnets = [
139
+ ResnetBlock2D(
140
+ in_channels=in_channels,
141
+ out_channels=in_channels,
142
+ temb_channels=temb_channels,
143
+ eps=resnet_eps,
144
+ groups=resnet_groups,
145
+ dropout=dropout,
146
+ time_embedding_norm=resnet_time_scale_shift,
147
+ non_linearity=resnet_act_fn,
148
+ output_scale_factor=output_scale_factor,
149
+ pre_norm=resnet_pre_norm,
150
+ )
151
+ ]
152
+ content_attentions = []
153
+ style_attentions = []
154
+
155
+ for _ in range(num_layers):
156
+ content_attentions.append(
157
+ ChannelAttnBlock(
158
+ in_channels=in_channels + content_channel,
159
+ out_channels=in_channels,
160
+ non_linearity=resnet_act_fn,
161
+ channel_attn=channel_attn,
162
+ reduction=reduction,
163
+ )
164
+ )
165
+ style_attentions.append(
166
+ SpatialTransformer(
167
+ in_channels,
168
+ attn_num_head_channels,
169
+ in_channels // attn_num_head_channels,
170
+ depth=1,
171
+ context_dim=cross_attention_dim,
172
+ num_groups=resnet_groups,
173
+ )
174
+ )
175
+ resnets.append(
176
+ ResnetBlock2D(
177
+ in_channels=in_channels,
178
+ out_channels=in_channels,
179
+ temb_channels=temb_channels,
180
+ eps=resnet_eps,
181
+ groups=resnet_groups,
182
+ dropout=dropout,
183
+ time_embedding_norm=resnet_time_scale_shift,
184
+ non_linearity=resnet_act_fn,
185
+ output_scale_factor=output_scale_factor,
186
+ pre_norm=resnet_pre_norm,
187
+ )
188
+ )
189
+
190
+ self.content_attentions = nn.ModuleList(content_attentions)
191
+ self.style_attentions = nn.ModuleList(style_attentions)
192
+ self.resnets = nn.ModuleList(resnets)
193
+
194
+ def forward(
195
+ self,
196
+ hidden_states,
197
+ temb=None,
198
+ encoder_hidden_states=None,
199
+ index=None,
200
+ ):
201
+ hidden_states = self.resnets[0](hidden_states, temb)
202
+ for content_attn, style_attn, resnet in zip(self.content_attentions, self.style_attentions, self.resnets[1:]):
203
+
204
+ # content
205
+ current_content_feature = encoder_hidden_states[1][index]
206
+ hidden_states = content_attn(hidden_states, current_content_feature)
207
+
208
+ # t_embed
209
+ hidden_states = resnet(hidden_states, temb)
210
+
211
+ # style
212
+ current_style_feature = encoder_hidden_states[0]
213
+ batch_size, channel, height, width = current_style_feature.shape
214
+ current_style_feature = current_style_feature.permute(0, 2, 3, 1).reshape(batch_size, height*width, channel)
215
+ hidden_states = style_attn(hidden_states, context=current_style_feature)
216
+
217
+ return hidden_states
218
+
219
+
220
+ class MCADownBlock2D(nn.Module):
221
+ def __init__(
222
+ self,
223
+ in_channels: int,
224
+ out_channels: int,
225
+ temb_channels: int,
226
+ dropout: float = 0.0,
227
+ channel_attn: bool = False,
228
+ num_layers: int = 1,
229
+ resnet_eps: float = 1e-6,
230
+ resnet_time_scale_shift: str = "default",
231
+ resnet_act_fn: str = "swish",
232
+ resnet_groups: int = 32,
233
+ resnet_pre_norm: bool = True,
234
+ attn_num_head_channels=1,
235
+ cross_attention_dim=1280,
236
+ attention_type="default",
237
+ output_scale_factor=1.0,
238
+ downsample_padding=1,
239
+ add_downsample=True,
240
+ content_channel=16,
241
+ reduction=32,
242
+ ):
243
+ super().__init__()
244
+ content_attentions = []
245
+ resnets = []
246
+ style_attentions = []
247
+
248
+ self.attention_type = attention_type
249
+ self.attn_num_head_channels = attn_num_head_channels
250
+
251
+ for i in range(num_layers):
252
+ in_channels = in_channels if i == 0 else out_channels
253
+ content_attentions.append(
254
+ ChannelAttnBlock(
255
+ in_channels=in_channels+content_channel,
256
+ out_channels=in_channels,
257
+ groups=resnet_groups,
258
+ non_linearity=resnet_act_fn,
259
+ channel_attn=channel_attn,
260
+ reduction=reduction,
261
+ )
262
+ )
263
+ resnets.append(
264
+ ResnetBlock2D(
265
+ in_channels=in_channels,
266
+ out_channels=out_channels,
267
+ temb_channels=temb_channels,
268
+ eps=resnet_eps,
269
+ groups=resnet_groups,
270
+ dropout=dropout,
271
+ time_embedding_norm=resnet_time_scale_shift,
272
+ non_linearity=resnet_act_fn,
273
+ output_scale_factor=output_scale_factor,
274
+ pre_norm=resnet_pre_norm,
275
+ )
276
+ )
277
+ print("The style_attention cross attention dim in Down Block {} layer is {}".format(i+1, cross_attention_dim))
278
+ style_attentions.append(
279
+ SpatialTransformer(
280
+ out_channels,
281
+ attn_num_head_channels,
282
+ out_channels // attn_num_head_channels,
283
+ depth=1,
284
+ context_dim=cross_attention_dim,
285
+ num_groups=resnet_groups,
286
+ )
287
+ )
288
+ self.content_attentions = nn.ModuleList(content_attentions)
289
+ self.style_attentions = nn.ModuleList(style_attentions)
290
+ self.resnets = nn.ModuleList(resnets)
291
+
292
+ if num_layers == 1:
293
+ in_channels = out_channels
294
+ if add_downsample:
295
+ self.downsamplers = nn.ModuleList(
296
+ [
297
+ Downsample2D(
298
+ in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
299
+ )
300
+ ]
301
+ )
302
+ else:
303
+ self.downsamplers = None
304
+
305
+ self.gradient_checkpointing = False
306
+
307
+ def forward(
308
+ self,
309
+ hidden_states,
310
+ index,
311
+ temb=None,
312
+ encoder_hidden_states=None
313
+ ):
314
+ output_states = ()
315
+
316
+ for content_attn, resnet, style_attn in zip(self.content_attentions, self.resnets, self.style_attentions):
317
+
318
+ # content
319
+ current_content_feature = encoder_hidden_states[1][index]
320
+ hidden_states = content_attn(hidden_states, current_content_feature)
321
+
322
+ # t_embed
323
+ hidden_states = resnet(hidden_states, temb)
324
+
325
+ # style
326
+ current_style_feature = encoder_hidden_states[0]
327
+ batch_size, channel, height, width = current_style_feature.shape
328
+ current_style_feature = current_style_feature.permute(0, 2, 3, 1).reshape(batch_size, height*width, channel)
329
+ hidden_states = style_attn(hidden_states, context=current_style_feature)
330
+
331
+ output_states += (hidden_states,)
332
+
333
+ if self.downsamplers is not None:
334
+ for downsampler in self.downsamplers:
335
+ hidden_states = downsampler(hidden_states)
336
+
337
+ output_states += (hidden_states,)
338
+
339
+ return hidden_states, output_states
340
+
341
+
342
+ class DownBlock2D(nn.Module):
343
+ def __init__(
344
+ self,
345
+ in_channels: int,
346
+ out_channels: int,
347
+ temb_channels: int,
348
+ dropout: float = 0.0,
349
+ num_layers: int = 1,
350
+ resnet_eps: float = 1e-6,
351
+ resnet_time_scale_shift: str = "default",
352
+ resnet_act_fn: str = "swish",
353
+ resnet_groups: int = 32,
354
+ resnet_pre_norm: bool = True,
355
+ output_scale_factor=1.0,
356
+ add_downsample=True,
357
+ downsample_padding=1,
358
+ ):
359
+ super().__init__()
360
+ resnets = []
361
+
362
+ for i in range(num_layers):
363
+ in_channels = in_channels if i == 0 else out_channels
364
+ resnets.append(
365
+ ResnetBlock2D(
366
+ in_channels=in_channels,
367
+ out_channels=out_channels,
368
+ temb_channels=temb_channels,
369
+ eps=resnet_eps,
370
+ groups=resnet_groups,
371
+ dropout=dropout,
372
+ time_embedding_norm=resnet_time_scale_shift,
373
+ non_linearity=resnet_act_fn,
374
+ output_scale_factor=output_scale_factor,
375
+ pre_norm=resnet_pre_norm,
376
+ )
377
+ )
378
+
379
+ self.resnets = nn.ModuleList(resnets)
380
+
381
+ if num_layers == 1:
382
+ in_channels = out_channels
383
+ if add_downsample:
384
+ self.downsamplers = nn.ModuleList(
385
+ [
386
+ Downsample2D(
387
+ in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
388
+ )
389
+ ]
390
+ )
391
+ else:
392
+ self.downsamplers = None
393
+
394
+ self.gradient_checkpointing = False
395
+
396
+ def forward(self, hidden_states, temb=None):
397
+ output_states = ()
398
+
399
+ for resnet in self.resnets:
400
+ if self.training and self.gradient_checkpointing:
401
+
402
+ def create_custom_forward(module):
403
+ def custom_forward(*inputs):
404
+ return module(*inputs)
405
+
406
+ return custom_forward
407
+
408
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
409
+ else:
410
+ hidden_states = resnet(hidden_states, temb)
411
+
412
+ output_states += (hidden_states,)
413
+
414
+ if self.downsamplers is not None:
415
+ for downsampler in self.downsamplers:
416
+ hidden_states = downsampler(hidden_states)
417
+
418
+ output_states += (hidden_states,)
419
+
420
+ return hidden_states, output_states
421
+
422
+
423
+ class StyleRSIUpBlock2D(nn.Module):
424
+ def __init__(
425
+ self,
426
+ in_channels: int,
427
+ out_channels: int,
428
+ prev_output_channel: int,
429
+ temb_channels: int,
430
+ dropout: float = 0.0,
431
+ num_layers: int = 1,
432
+ resnet_eps: float = 1e-6,
433
+ resnet_time_scale_shift: str = "default",
434
+ resnet_act_fn: str = "swish",
435
+ resnet_groups: int = 32,
436
+ resnet_pre_norm: bool = True,
437
+ attn_num_head_channels=1,
438
+ cross_attention_dim=1280,
439
+ attention_type="default",
440
+ output_scale_factor=1.0,
441
+ downsample_padding=1,
442
+ structure_feature_begin=64,
443
+ upblock_index=1,
444
+ add_upsample=True,
445
+ ):
446
+ super().__init__()
447
+ resnets = []
448
+ attentions = []
449
+ sc_interpreter_offsets = []
450
+ dcn_deforms = []
451
+
452
+ self.attention_type = attention_type
453
+ self.attn_num_head_channels = attn_num_head_channels
454
+ self.upblock_index = upblock_index
455
+
456
+ for i in range(num_layers):
457
+ res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
458
+ resnet_in_channels = prev_output_channel if i == 0 else out_channels
459
+
460
+ sc_interpreter_offsets.append(
461
+ OffsetRefStrucInter(
462
+ res_in_channels=res_skip_channels,
463
+ style_feat_in_channels=int(structure_feature_begin * 2 / upblock_index),
464
+ n_heads=attn_num_head_channels,
465
+ num_groups=resnet_groups,
466
+ )
467
+ )
468
+ dcn_deforms.append(
469
+ DeformConv2d(
470
+ in_channels=res_skip_channels,
471
+ out_channels=res_skip_channels,
472
+ kernel_size=(3, 3),
473
+ stride=1,
474
+ padding=1,
475
+ dilation=1,
476
+ )
477
+ )
478
+
479
+ resnets.append(
480
+ ResnetBlock2D(
481
+ in_channels=resnet_in_channels + res_skip_channels,
482
+ out_channels=out_channels,
483
+ temb_channels=temb_channels,
484
+ eps=resnet_eps,
485
+ groups=resnet_groups,
486
+ dropout=dropout,
487
+ time_embedding_norm=resnet_time_scale_shift,
488
+ non_linearity=resnet_act_fn,
489
+ output_scale_factor=output_scale_factor,
490
+ pre_norm=resnet_pre_norm,
491
+ )
492
+ )
493
+ attentions.append(
494
+ SpatialTransformer(
495
+ out_channels,
496
+ attn_num_head_channels,
497
+ out_channels // attn_num_head_channels,
498
+ depth=1,
499
+ context_dim=cross_attention_dim,
500
+ num_groups=resnet_groups,
501
+ )
502
+ )
503
+ self.sc_interpreter_offsets = nn.ModuleList(sc_interpreter_offsets)
504
+ self.dcn_deforms = nn.ModuleList(dcn_deforms)
505
+ self.attentions = nn.ModuleList(attentions)
506
+ self.resnets = nn.ModuleList(resnets)
507
+
508
+ self.num_layers = num_layers
509
+
510
+ if add_upsample:
511
+ self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
512
+ else:
513
+ self.upsamplers = None
514
+
515
+ self.gradient_checkpointing = False
516
+
517
+ def set_attention_slice(self, slice_size):
518
+ if slice_size is not None and self.attn_num_head_channels % slice_size != 0:
519
+ raise ValueError(
520
+ f"Make sure slice_size {slice_size} is a divisor of "
521
+ f"the number of heads used in cross_attention {self.attn_num_head_channels}"
522
+ )
523
+ if slice_size is not None and slice_size > self.attn_num_head_channels:
524
+ raise ValueError(
525
+ f"Chunk_size {slice_size} has to be smaller or equal to "
526
+ f"the number of heads used in cross_attention {self.attn_num_head_channels}"
527
+ )
528
+
529
+ for attn in self.attentions:
530
+ attn._set_attention_slice(slice_size)
531
+
532
+ self.gradient_checkpointing = False
533
+
534
+ def forward(
535
+ self,
536
+ hidden_states,
537
+ res_hidden_states_tuple,
538
+ style_structure_features,
539
+ temb=None,
540
+ encoder_hidden_states=None,
541
+ upsample_size=None,
542
+ ):
543
+ total_offset = 0
544
+
545
+ style_content_feat = style_structure_features[-self.upblock_index-2]
546
+
547
+ for i, (sc_inter_offset, dcn_deform, resnet, attn) in \
548
+ enumerate(zip(self.sc_interpreter_offsets, self.dcn_deforms, self.resnets, self.attentions)):
549
+ # pop res hidden states
550
+ res_hidden_states = res_hidden_states_tuple[-1]
551
+ res_hidden_states_tuple = res_hidden_states_tuple[:-1]
552
+
553
+ # Skip Style Content Interpreter by DCN
554
+ offset = sc_inter_offset(res_hidden_states, style_content_feat)
555
+ offset = offset.contiguous()
556
+ # offset sum
557
+ offset_sum = torch.mean(torch.abs(offset))
558
+ total_offset += offset_sum
559
+
560
+ res_hidden_states = res_hidden_states.contiguous()
561
+ res_hidden_states = dcn_deform(res_hidden_states, offset)
562
+ # concat as input
563
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
564
+
565
+ if self.training and self.gradient_checkpointing:
566
+
567
+ def create_custom_forward(module):
568
+ def custom_forward(*inputs):
569
+ return module(*inputs)
570
+
571
+ return custom_forward
572
+
573
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
574
+ hidden_states = torch.utils.checkpoint.checkpoint(
575
+ create_custom_forward(attn), hidden_states, encoder_hidden_states
576
+ )
577
+ else:
578
+ hidden_states = resnet(hidden_states, temb)
579
+ hidden_states = attn(hidden_states, context=encoder_hidden_states)
580
+
581
+ if self.upsamplers is not None:
582
+ for upsampler in self.upsamplers:
583
+ hidden_states = upsampler(hidden_states, upsample_size)
584
+
585
+ offset_out = total_offset / self.num_layers
586
+
587
+ return hidden_states, offset_out
588
+
589
+
590
+ class UpBlock2D(nn.Module):
591
+ def __init__(
592
+ self,
593
+ in_channels: int,
594
+ prev_output_channel: int,
595
+ out_channels: int,
596
+ temb_channels: int,
597
+ dropout: float = 0.0,
598
+ num_layers: int = 1,
599
+ resnet_eps: float = 1e-6,
600
+ resnet_time_scale_shift: str = "default",
601
+ resnet_act_fn: str = "swish",
602
+ resnet_groups: int = 32,
603
+ resnet_pre_norm: bool = True,
604
+ output_scale_factor=1.0,
605
+ add_upsample=True,
606
+ ):
607
+ super().__init__()
608
+ resnets = []
609
+
610
+ for i in range(num_layers):
611
+ res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
612
+ resnet_in_channels = prev_output_channel if i == 0 else out_channels
613
+
614
+ resnets.append(
615
+ ResnetBlock2D(
616
+ in_channels=resnet_in_channels + res_skip_channels,
617
+ out_channels=out_channels,
618
+ temb_channels=temb_channels,
619
+ eps=resnet_eps,
620
+ groups=resnet_groups,
621
+ dropout=dropout,
622
+ time_embedding_norm=resnet_time_scale_shift,
623
+ non_linearity=resnet_act_fn,
624
+ output_scale_factor=output_scale_factor,
625
+ pre_norm=resnet_pre_norm,
626
+ )
627
+ )
628
+
629
+ self.resnets = nn.ModuleList(resnets)
630
+
631
+ if add_upsample:
632
+ self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
633
+ else:
634
+ self.upsamplers = None
635
+
636
+ self.gradient_checkpointing = False
637
+
638
+ def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
639
+ for resnet in self.resnets:
640
+ # pop res hidden states
641
+ res_hidden_states = res_hidden_states_tuple[-1]
642
+ res_hidden_states_tuple = res_hidden_states_tuple[:-1]
643
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
644
+
645
+ if self.training and self.gradient_checkpointing:
646
+
647
+ def create_custom_forward(module):
648
+ def custom_forward(*inputs):
649
+ return module(*inputs)
650
+
651
+ return custom_forward
652
+
653
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
654
+ else:
655
+ hidden_states = resnet(hidden_states, temb)
656
+
657
+ if self.upsamplers is not None:
658
+ for upsampler in self.upsamplers:
659
+ hidden_states = upsampler(hidden_states, upsample_size)
660
+
661
+ return hidden_states
ttf/KaiXinSongA.ttf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e11c8d15dcef64e5b55548e5764442d1b1f3be6fc52346f1338af9b48cf19bd
3
+ size 10220244
ttf/KaiXinSongB.ttf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da7bec78a819495232d286244fe0c1f95d147e84811b80ece047169c57cd4a45
3
+ size 27296536
utils.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import yaml
4
+ import copy
5
+ import pygame
6
+ import numpy as np
7
+ from PIL import Image
8
+ from fontTools.ttLib import TTFont
9
+
10
+ import torch
11
+ import torchvision.transforms as transforms
12
+
13
+ def save_args_to_yaml(args, output_file):
14
+ # Convert args namespace to a dictionary
15
+ args_dict = vars(args)
16
+
17
+ # Write the dictionary to a YAML file
18
+ with open(output_file, 'w') as yaml_file:
19
+ yaml.dump(args_dict, yaml_file, default_flow_style=False)
20
+
21
+
22
+ def save_single_image(save_dir, image):
23
+
24
+ save_path = f"{save_dir}/out_single.png"
25
+ image.save(save_path)
26
+
27
+
28
+ def save_image_with_content_style(save_dir, image, content_image_pil, content_image_path, style_image_path, resolution):
29
+
30
+ new_image = Image.new('RGB', (resolution*3, resolution))
31
+ if content_image_pil is not None:
32
+ content_image = content_image_pil
33
+ else:
34
+ content_image = Image.open(content_image_path).convert("RGB").resize((resolution, resolution), Image.BILINEAR)
35
+ style_image = Image.open(style_image_path).convert("RGB").resize((resolution, resolution), Image.BILINEAR)
36
+
37
+ new_image.paste(content_image, (0, 0))
38
+ new_image.paste(style_image, (resolution, 0))
39
+ new_image.paste(image, (resolution*2, 0))
40
+
41
+ save_path = f"{save_dir}/out_with_cs.jpg"
42
+ new_image.save(save_path)
43
+
44
+
45
+ def x0_from_epsilon(scheduler, noise_pred, x_t, timesteps):
46
+ """Return the x_0 from epsilon
47
+ """
48
+ batch_size = noise_pred.shape[0]
49
+ for i in range(batch_size):
50
+ noise_pred_i = noise_pred[i]
51
+ noise_pred_i = noise_pred_i[None, :]
52
+ t = timesteps[i]
53
+ x_t_i = x_t[i]
54
+ x_t_i = x_t_i[None, :]
55
+
56
+ pred_original_sample_i = scheduler.step(
57
+ model_output=noise_pred_i,
58
+ timestep=t,
59
+ sample=x_t_i,
60
+ # predict_epsilon=True,
61
+ generator=None,
62
+ return_dict=True,
63
+ ).pred_original_sample
64
+ if i == 0:
65
+ pred_original_sample = pred_original_sample_i
66
+ else:
67
+ pred_original_sample = torch.cat((pred_original_sample, pred_original_sample_i), dim=0)
68
+
69
+ return pred_original_sample
70
+
71
+
72
+ def reNormalize_img(pred_original_sample):
73
+ pred_original_sample = (pred_original_sample / 2 + 0.5).clamp(0, 1)
74
+
75
+ return pred_original_sample
76
+
77
+
78
+ def normalize_mean_std(image):
79
+ transforms_norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
80
+ image = transforms_norm(image)
81
+
82
+ return image
83
+
84
+
85
+ def is_char_in_font(font_path, char):
86
+ TTFont_font = TTFont(font_path)
87
+ cmap = TTFont_font['cmap']
88
+ for subtable in cmap.tables:
89
+ if ord(char) in subtable.cmap:
90
+ return True
91
+ return False
92
+
93
+
94
+ def load_ttf(ttf_path, fsize=128):
95
+ pygame.init()
96
+
97
+ font = pygame.freetype.Font(ttf_path, size=fsize)
98
+ return font
99
+
100
+
101
+ def ttf2im(font, char, fsize=128):
102
+
103
+ try:
104
+ surface, _ = font.render(char)
105
+ except:
106
+ print("No glyph for char {}".format(char))
107
+ return
108
+ bg = np.full((fsize, fsize), 255)
109
+ imo = pygame.surfarray.pixels_alpha(surface).transpose(1, 0)
110
+ imo = 255 - np.array(Image.fromarray(imo))
111
+ im = copy.deepcopy(bg)
112
+ h, w = imo.shape[:2]
113
+ if h > fsize:
114
+ h, w = fsize, round(w*fsize/h)
115
+ imo = cv2.resize(imo, (w, h))
116
+ if w > fsize:
117
+ h, w = round(h*fsize/w), fsize
118
+ imo = cv2.resize(imo, (w, h))
119
+ x, y = round((fsize-w)/2), round((fsize-h)/2)
120
+ im[y:h+y, x:x+w] = imo
121
+ pil_im = Image.fromarray(im.astype('uint8')).convert('RGB')
122
+
123
+ return pil_im