peter850421 commited on
Commit
e1ebf71
1 Parent(s): 2ea7ac8

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +49 -0
  2. LICENSE +218 -0
  3. README.md +147 -7
  4. __pycache__/drag_pipeline.cpython-38.pyc +0 -0
  5. drag_pipeline.py +493 -0
  6. drag_ui.py +335 -0
  7. environment.yaml +48 -0
  8. local_pretrained_models/dummy.txt +1 -0
  9. lora/lora_ckpt/dummy.txt +1 -0
  10. lora/samples/cat_dog/andrew-s-ouo1hbizWwo-unsplash.jpg +0 -0
  11. lora/samples/oilpaint1/catherine-kay-greenup-6rhUen8Wrao-unsplash.jpg +0 -0
  12. lora/samples/oilpaint2/birmingham-museums-trust-wKlHsooRVbg-unsplash.jpg +0 -0
  13. lora/samples/prompts.txt +6 -0
  14. lora/samples/sculpture/evan-lee-EdAVNRvUVH4-unsplash.jpg +0 -0
  15. lora/train_dreambooth_lora.py +1324 -0
  16. lora/train_lora.sh +21 -0
  17. lora_tmp/pytorch_lora_weights.bin +3 -0
  18. release-doc/asset/accelerate_config.jpg +0 -0
  19. release-doc/asset/github_video.gif +3 -0
  20. release-doc/licenses/LICENSE-lora.txt +201 -0
  21. results/2023-12-01-2318-20.png +0 -0
  22. results/2023-12-01-2319-14.png +0 -0
  23. results/2023-12-01-2320-47.png +0 -0
  24. results/2023-12-01-2321-38.png +0 -0
  25. results/2023-12-01-2322-25.png +0 -0
  26. results/2023-12-01-2324-23.png +0 -0
  27. results/2023-12-01-2326-06.png +0 -0
  28. results/2023-12-01-2328-23.png +0 -0
  29. results/2023-12-01-2329-06.png +0 -0
  30. results/2023-12-01-2330-14.png +0 -0
  31. results/2023-12-01-2331-09.png +0 -0
  32. results/2023-12-01-2331-41.png +0 -0
  33. results/2023-12-01-2332-17.png +0 -0
  34. results/2023-12-01-2336-40.png +0 -0
  35. results/2023-12-01-2338-51.png +3 -0
  36. results/2023-12-01-2340-40.png +3 -0
  37. results/2023-12-01-2342-40.png +0 -0
  38. results/2023-12-01-2349-09.png +3 -0
  39. results/2023-12-01-2350-12.png +3 -0
  40. results/2023-12-01-2353-51.png +3 -0
  41. results/2023-12-01-2355-54.png +3 -0
  42. results/2023-12-01-2357-39.png +3 -0
  43. results/2023-12-02-0000-23.png +3 -0
  44. results/2023-12-02-0002-02.png +3 -0
  45. results/2023-12-02-0004-46.png +0 -0
  46. results/2023-12-05-1935-28.png +3 -0
  47. results/2023-12-05-1936-51.png +3 -0
  48. results/2023-12-05-1937-52.png +3 -0
  49. results/2023-12-05-1939-28.png +3 -0
  50. results/2023-12-05-1944-37.png +0 -0
.gitattributes CHANGED
@@ -33,3 +33,52 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ release-doc/asset/github_video.gif filter=lfs diff=lfs merge=lfs -text
37
+ results/2023-12-01-2338-51.png filter=lfs diff=lfs merge=lfs -text
38
+ results/2023-12-01-2340-40.png filter=lfs diff=lfs merge=lfs -text
39
+ results/2023-12-01-2349-09.png filter=lfs diff=lfs merge=lfs -text
40
+ results/2023-12-01-2350-12.png filter=lfs diff=lfs merge=lfs -text
41
+ results/2023-12-01-2353-51.png filter=lfs diff=lfs merge=lfs -text
42
+ results/2023-12-01-2355-54.png filter=lfs diff=lfs merge=lfs -text
43
+ results/2023-12-01-2357-39.png filter=lfs diff=lfs merge=lfs -text
44
+ results/2023-12-02-0000-23.png filter=lfs diff=lfs merge=lfs -text
45
+ results/2023-12-02-0002-02.png filter=lfs diff=lfs merge=lfs -text
46
+ results/2023-12-05-1935-28.png filter=lfs diff=lfs merge=lfs -text
47
+ results/2023-12-05-1936-51.png filter=lfs diff=lfs merge=lfs -text
48
+ results/2023-12-05-1937-52.png filter=lfs diff=lfs merge=lfs -text
49
+ results/2023-12-05-1939-28.png filter=lfs diff=lfs merge=lfs -text
50
+ results/2023-12-05-1951-55.png filter=lfs diff=lfs merge=lfs -text
51
+ results/2023-12-05-2007-38.png filter=lfs diff=lfs merge=lfs -text
52
+ results/2023-12-05-2020-44.png filter=lfs diff=lfs merge=lfs -text
53
+ results/2023-12-05-2024-00.gif filter=lfs diff=lfs merge=lfs -text
54
+ results/2023-12-05-2024-01.png filter=lfs diff=lfs merge=lfs -text
55
+ results/2023-12-05-2026-48.gif filter=lfs diff=lfs merge=lfs -text
56
+ results/2023-12-05-2026-50.png filter=lfs diff=lfs merge=lfs -text
57
+ results/2023-12-05-2037-28.gif filter=lfs diff=lfs merge=lfs -text
58
+ results/2023-12-05-2042-05.gif filter=lfs diff=lfs merge=lfs -text
59
+ results/2023-12-05-2047-11.gif filter=lfs diff=lfs merge=lfs -text
60
+ results/2023-12-05-2047-13.png filter=lfs diff=lfs merge=lfs -text
61
+ results/2023-12-05-2050-26.gif filter=lfs diff=lfs merge=lfs -text
62
+ results/2023-12-08-0124-52.png filter=lfs diff=lfs merge=lfs -text
63
+ results/2023-12-08-0136-07.png filter=lfs diff=lfs merge=lfs -text
64
+ results/2023-12-08-0143-46.png filter=lfs diff=lfs merge=lfs -text
65
+ results/2023-12-08-0146-41.gif filter=lfs diff=lfs merge=lfs -text
66
+ results/2023-12-08-0146-45.png filter=lfs diff=lfs merge=lfs -text
67
+ results/2023-12-08-0149-29.png filter=lfs diff=lfs merge=lfs -text
68
+ results/2023-12-08-0152-29.png filter=lfs diff=lfs merge=lfs -text
69
+ results/2023-12-08-0153-19.png filter=lfs diff=lfs merge=lfs -text
70
+ results/2023-12-08-0154-20.png filter=lfs diff=lfs merge=lfs -text
71
+ results/2023-12-08-0155-38.png filter=lfs diff=lfs merge=lfs -text
72
+ results/2023-12-08-0156-15.png filter=lfs diff=lfs merge=lfs -text
73
+ results/2023-12-08-0156-34.png filter=lfs diff=lfs merge=lfs -text
74
+ results/2023-12-08-0157-09.png filter=lfs diff=lfs merge=lfs -text
75
+ results/2023-12-08-0157-52.png filter=lfs diff=lfs merge=lfs -text
76
+ results/2023-12-08-0159-25.png filter=lfs diff=lfs merge=lfs -text
77
+ results/2023-12-08-0200-31.gif filter=lfs diff=lfs merge=lfs -text
78
+ results/2023-12-08-0200-33.png filter=lfs diff=lfs merge=lfs -text
79
+ results/2023-12-08-0202-12.gif filter=lfs diff=lfs merge=lfs -text
80
+ results/2023-12-08-0202-13.png filter=lfs diff=lfs merge=lfs -text
81
+ results/2023-12-08-0215-08.gif filter=lfs diff=lfs merge=lfs -text
82
+ results/2023-12-08-0217-26.gif filter=lfs diff=lfs merge=lfs -text
83
+ results/2023-12-08-0219-21.gif filter=lfs diff=lfs merge=lfs -text
84
+ results/2023-12-08-0223-15.gif filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "{}"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright {yyyy} {name of copyright owner}
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
202
+
203
+ =======================================================================
204
+ Apache DragDiffusion Subcomponents:
205
+
206
+ The Apache DragDiffusion project contains subcomponents with separate copyright
207
+ notices and license terms. Your use of the source code for the these
208
+ subcomponents is subject to the terms and conditions of the following
209
+ licenses.
210
+
211
+ ========================================================================
212
+ Apache 2.0 licenses
213
+ ========================================================================
214
+
215
+ The following components are provided under the Apache License. See project link for details.
216
+ The text of each license is the standard Apache 2.0 license.
217
+
218
+ files from lora: https://github.com/huggingface/diffusers/blob/v0.17.1/examples/dreambooth/train_dreambooth_lora.py apache 2.0
README.md CHANGED
@@ -1,12 +1,152 @@
1
  ---
2
  title: DragDiffusion
3
- emoji: 🦀
4
- colorFrom: indigo
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.8.0
8
- app_file: app.py
9
- pinned: false
10
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: DragDiffusion
3
+ app_file: drag_ui.py
 
 
4
  sdk: gradio
5
+ sdk_version: 3.41.1
 
 
6
  ---
7
+ <p align="center">
8
+ <h1 align="center">DragDiffusion: Harnessing Diffusion Models for Interactive Point-based Image Editing</h1>
9
+ <p align="center">
10
+ <a href="https://yujun-shi.github.io/"><strong>Yujun Shi</strong></a>
11
+ &nbsp;&nbsp;
12
+ <strong>Chuhui Xue</strong>
13
+ &nbsp;&nbsp;
14
+ <strong>Jiachun Pan</strong>
15
+ &nbsp;&nbsp;
16
+ <strong>Wenqing Zhang</strong>
17
+ &nbsp;&nbsp;
18
+ <a href="https://vyftan.github.io/"><strong>Vincent Y. F. Tan</strong></a>
19
+ &nbsp;&nbsp;
20
+ <a href="https://songbai.site/"><strong>Song Bai</strong></a>
21
+ </p>
22
+ <div align="center">
23
+ <img src="./release-doc/asset/github_video.gif", width="700">
24
+ </div>
25
+ <br>
26
+ <p align="center">
27
+ <a href="https://arxiv.org/abs/2306.14435"><img alt='arXiv' src="https://img.shields.io/badge/arXiv-2306.14435-b31b1b.svg"></a>
28
+ <a href="https://yujun-shi.github.io/projects/dragdiffusion.html"><img alt='page' src="https://img.shields.io/badge/Project-Website-orange"></a>
29
+ <a href="https://twitter.com/YujunPeiyangShi"><img alt='Twitter' src="https://img.shields.io/twitter/follow/YujunPeiyangShi?label=%40YujunPeiyangShi"></a>
30
+ </p>
31
+ <br>
32
+ </p>
33
+
34
+ ## Disclaimer
35
+ This is a research project, NOT a commercial product.
36
+
37
+ ## News and Update
38
+ * [Sept 3rd] v0.1.0 Release.
39
+ * Enable **Dragging Diffusion-Generated Images.**
40
+ * Introducing a new guidance mechanism that **greatly improve quality of dragging results.** (Inspired by [MasaCtrl](https://ljzycmd.github.io/projects/MasaCtrl/))
41
+ * Enable Dragging Images with arbitrary aspect ratio
42
+ * Adding support for DPM++Solver (Generated Images)
43
+ * [July 18th] v0.0.1 Release.
44
+ * Integrate LoRA training into the User Interface. **No need to use training script and everything can be conveniently done in UI!**
45
+ * Optimize User Interface layout.
46
+ * Enable using better VAE for eyes and faces (See [this](https://stable-diffusion-art.com/how-to-use-vae/))
47
+ * [July 8th] v0.0.0 Release.
48
+ * Implement Basic function of DragDiffusion
49
+
50
+ ## Installation
51
+
52
+ It is recommended to run our code on a Nvidia GPU with a linux system. We have not yet tested on other configurations. Currently, it requires around 14 GB GPU memory to run our method. We will continue to optimize memory efficiency
53
+
54
+ To install the required libraries, simply run the following command:
55
+ ```
56
+ conda env create -f environment.yaml
57
+ conda activate dragdiff
58
+ ```
59
+
60
+ ## Run DragDiffusion
61
+ To start with, in command line, run the following to start the gradio user interface:
62
+ ```
63
+ python3 drag_ui_real.py
64
+ ```
65
+
66
+ You may check our [GIF above](https://github.com/Yujun-Shi/DragDiffusion/blob/main/release-doc/asset/github_video.gif) that demonstrate the usage of UI in a step-by-step manner.
67
+
68
+ Basically, it consists of the following steps:
69
+
70
+ #### Step 1: train a LoRA
71
+ 1) Drop our input image into the left-most box.
72
+ 2) Input a prompt describing the image in the "prompt" field
73
+ 3) Click the "Train LoRA" button to train a LoRA given the input image
74
+
75
+ #### Step 2: do "drag" editing
76
+ 1) Draw a mask in the left-most box to specify the editable areas.
77
+ 2) Click handle and target points in the middle box. Also, you may reset all points by clicking "Undo point".
78
+ 3) Click the "Run" button to run our algorithm. Edited results will be displayed in the right-most box.
79
+
80
+
81
+ ## Explanation for parameters in the user interface:
82
+ #### General Parameters
83
+ |Parameter|Explanation|
84
+ |-----|------|
85
+ |prompt|The prompt describing the user input image (This will be used to train the LoRA and conduct "drag" editing).|
86
+ |lora_path|The directory where the trained LoRA will be saved.|
87
+
88
+
89
+ #### Algorithm Parameters
90
+ These parameters are collapsed by default as we normally do not have to tune them. Here are the explanations:
91
+ * Base Model Config
92
+
93
+ |Parameter|Explanation|
94
+ |-----|------|
95
+ |Diffusion Model Path|The path to the diffusion models. By default we are using "runwayml/stable-diffusion-v1-5". We will add support for more models in the future.|
96
+ |VAE Choice|The Choice of VAE. Now there are two choices, one is "default", which will use the original VAE. Another choice is "stabilityai/sd-vae-ft-mse", which can improve results on images with human eyes and faces (see [explanation](https://stable-diffusion-art.com/how-to-use-vae/))|
97
+
98
+ * Drag Parameters
99
+
100
+ |Parameter|Explanation|
101
+ |-----|------|
102
+ |n_pix_step|Maximum number of steps of motion supervision. **Increase this if handle points have not been "dragged" to desired position.**|
103
+ |lam|The regularization coefficient controlling unmasked region stays unchanged. Increase this value if the unmasked region has changed more than what was desired (do not have to tune in most cases).|
104
+ |n_actual_inference_step|Number of DDIM inversion steps performed (do not have to tune in most cases).|
105
+
106
+ * LoRA Parameters
107
+
108
+ |Parameter|Explanation|
109
+ |-----|------|
110
+ |LoRA training steps|Number of LoRA training steps (do not have to tune in most cases).|
111
+ |LoRA learning rate|Learning rate of LoRA (do not have to tune in most cases)|
112
+ |LoRA rank|Rank of the LoRA (do not have to tune in most cases).|
113
+
114
+
115
+ ## License
116
+ Code related to the DragDiffusion algorithm is under Apache 2.0 license.
117
+
118
+
119
+ ## BibTeX
120
+ ```bibtex
121
+ @article{shi2023dragdiffusion,
122
+ title={DragDiffusion: Harnessing Diffusion Models for Interactive Point-based Image Editing},
123
+ author={Shi, Yujun and Xue, Chuhui and Pan, Jiachun and Zhang, Wenqing and Tan, Vincent YF and Bai, Song},
124
+ journal={arXiv preprint arXiv:2306.14435},
125
+ year={2023}
126
+ }
127
+ ```
128
+
129
+ ## TODO
130
+ - [x] Upload trained LoRAs of our examples
131
+ - [x] Integrate the lora training function into the user interface.
132
+ - [ ] Support using more diffusion models
133
+ - [ ] Support using LoRA downloaded online
134
+
135
+ ## Contact
136
+ For any questions on this project, please contact [Yujun](https://yujun-shi.github.io/) (shi.yujun@u.nus.edu)
137
+
138
+ ## Acknowledgement
139
+ This work is inspired by the amazing [DragGAN](https://vcai.mpi-inf.mpg.de/projects/DragGAN/). The lora training code is modified from an [example](https://github.com/huggingface/diffusers/blob/v0.17.1/examples/dreambooth/train_dreambooth_lora.py) of diffusers. Image samples are collected from [unsplash](https://unsplash.com/), [pexels](https://www.pexels.com/zh-cn/), [pixabay](https://pixabay.com/). Finally, a huge shout-out to all the amazing open source diffusion models and libraries.
140
+
141
+ ## Related Links
142
+ * [Drag Your GAN: Interactive Point-based Manipulation on the Generative Image Manifold](https://vcai.mpi-inf.mpg.de/projects/DragGAN/)
143
+ * [MasaCtrl: Tuning-free Mutual Self-Attention Control for Consistent Image Synthesis and Editing](https://ljzycmd.github.io/projects/MasaCtrl/)
144
+ * [Emergent Correspondence from Image Diffusion](https://diffusionfeatures.github.io/)
145
+ * [DragonDiffusion: Enabling Drag-style Manipulation on Diffusion Models](https://mc-e.github.io/project/DragonDiffusion/)
146
+ * [FreeDrag: Point Tracking is Not You Need for Interactive Point-based Image Editing](https://lin-chen.site/projects/freedrag/)
147
+
148
+
149
+ ## Common Issues and Solutions
150
+ 1) For users struggling in loading models from huggingface due to internet constraint, please 1) follow this [links](https://zhuanlan.zhihu.com/p/475260268) and download the model into the directory "local\_pretrained\_models"; 2) Run "drag\_ui\_real.py" and select the directory to your pretrained model in "Algorithm Parameters -> Base Model Config -> Diffusion Model Path".
151
+
152
 
 
__pycache__/drag_pipeline.cpython-38.pyc ADDED
Binary file (10 kB). View file
 
drag_pipeline.py ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # *************************************************************************
2
+ # Copyright (2023) Bytedance Inc.
3
+ #
4
+ # Copyright (2023) DragDiffusion Authors
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ # *************************************************************************
18
+
19
+ import torch
20
+ import numpy as np
21
+
22
+ import torch.nn.functional as F
23
+ from tqdm import tqdm
24
+ from PIL import Image
25
+ from typing import Any, Dict, List, Optional, Tuple, Union
26
+
27
+ from diffusers import StableDiffusionPipeline
28
+
29
+ # override unet forward
30
+ # The only difference from diffusers:
31
+ # return intermediate UNet features of all UpSample blocks
32
+ def override_forward(self):
33
+
34
+ def forward(
35
+ sample: torch.FloatTensor,
36
+ timestep: Union[torch.Tensor, float, int],
37
+ encoder_hidden_states: torch.Tensor,
38
+ class_labels: Optional[torch.Tensor] = None,
39
+ timestep_cond: Optional[torch.Tensor] = None,
40
+ attention_mask: Optional[torch.Tensor] = None,
41
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
42
+ down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
43
+ mid_block_additional_residual: Optional[torch.Tensor] = None,
44
+ return_intermediates: bool = False,
45
+ last_up_block_idx: int = None,
46
+ ):
47
+ # By default samples have to be AT least a multiple of the overall upsampling factor.
48
+ # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
49
+ # However, the upsampling interpolation output size can be forced to fit any upsampling size
50
+ # on the fly if necessary.
51
+ default_overall_up_factor = 2**self.num_upsamplers
52
+
53
+ # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
54
+ forward_upsample_size = False
55
+ upsample_size = None
56
+
57
+ if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
58
+ logger.info("Forward upsample size to force interpolation output size.")
59
+ forward_upsample_size = True
60
+
61
+ # prepare attention_mask
62
+ if attention_mask is not None:
63
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
64
+ attention_mask = attention_mask.unsqueeze(1)
65
+
66
+ # 0. center input if necessary
67
+ if self.config.center_input_sample:
68
+ sample = 2 * sample - 1.0
69
+
70
+ # 1. time
71
+ timesteps = timestep
72
+ if not torch.is_tensor(timesteps):
73
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
74
+ # This would be a good case for the `match` statement (Python 3.10+)
75
+ is_mps = sample.device.type == "mps"
76
+ if isinstance(timestep, float):
77
+ dtype = torch.float32 if is_mps else torch.float64
78
+ else:
79
+ dtype = torch.int32 if is_mps else torch.int64
80
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
81
+ elif len(timesteps.shape) == 0:
82
+ timesteps = timesteps[None].to(sample.device)
83
+
84
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
85
+ timesteps = timesteps.expand(sample.shape[0])
86
+
87
+ t_emb = self.time_proj(timesteps)
88
+
89
+ # `Timesteps` does not contain any weights and will always return f32 tensors
90
+ # but time_embedding might actually be running in fp16. so we need to cast here.
91
+ # there might be better ways to encapsulate this.
92
+ t_emb = t_emb.to(dtype=self.dtype)
93
+
94
+ emb = self.time_embedding(t_emb, timestep_cond)
95
+
96
+ if self.class_embedding is not None:
97
+ if class_labels is None:
98
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
99
+
100
+ if self.config.class_embed_type == "timestep":
101
+ class_labels = self.time_proj(class_labels)
102
+
103
+ # `Timesteps` does not contain any weights and will always return f32 tensors
104
+ # there might be better ways to encapsulate this.
105
+ class_labels = class_labels.to(dtype=sample.dtype)
106
+
107
+ class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
108
+
109
+ if self.config.class_embeddings_concat:
110
+ emb = torch.cat([emb, class_emb], dim=-1)
111
+ else:
112
+ emb = emb + class_emb
113
+
114
+ if self.config.addition_embed_type == "text":
115
+ aug_emb = self.add_embedding(encoder_hidden_states)
116
+ emb = emb + aug_emb
117
+
118
+ if self.time_embed_act is not None:
119
+ emb = self.time_embed_act(emb)
120
+
121
+ if self.encoder_hid_proj is not None:
122
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
123
+
124
+ # 2. pre-process
125
+ sample = self.conv_in(sample)
126
+
127
+ # 3. down
128
+ down_block_res_samples = (sample,)
129
+ for downsample_block in self.down_blocks:
130
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
131
+ sample, res_samples = downsample_block(
132
+ hidden_states=sample,
133
+ temb=emb,
134
+ encoder_hidden_states=encoder_hidden_states,
135
+ attention_mask=attention_mask,
136
+ cross_attention_kwargs=cross_attention_kwargs,
137
+ )
138
+ else:
139
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
140
+
141
+ down_block_res_samples += res_samples
142
+
143
+ if down_block_additional_residuals is not None:
144
+ new_down_block_res_samples = ()
145
+
146
+ for down_block_res_sample, down_block_additional_residual in zip(
147
+ down_block_res_samples, down_block_additional_residuals
148
+ ):
149
+ down_block_res_sample = down_block_res_sample + down_block_additional_residual
150
+ new_down_block_res_samples += (down_block_res_sample,)
151
+
152
+ down_block_res_samples = new_down_block_res_samples
153
+
154
+ # 4. mid
155
+ if self.mid_block is not None:
156
+ sample = self.mid_block(
157
+ sample,
158
+ emb,
159
+ encoder_hidden_states=encoder_hidden_states,
160
+ attention_mask=attention_mask,
161
+ cross_attention_kwargs=cross_attention_kwargs,
162
+ )
163
+
164
+ if mid_block_additional_residual is not None:
165
+ sample = sample + mid_block_additional_residual
166
+
167
+ # 5. up
168
+ # only difference from diffusers:
169
+ # save the intermediate features of unet upsample blocks
170
+ # the 0-th element is the mid-block output
171
+ all_intermediate_features = [sample]
172
+ for i, upsample_block in enumerate(self.up_blocks):
173
+ is_final_block = i == len(self.up_blocks) - 1
174
+
175
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
176
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
177
+
178
+ # if we have not reached the final block and need to forward the
179
+ # upsample size, we do it here
180
+ if not is_final_block and forward_upsample_size:
181
+ upsample_size = down_block_res_samples[-1].shape[2:]
182
+
183
+ if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
184
+ sample = upsample_block(
185
+ hidden_states=sample,
186
+ temb=emb,
187
+ res_hidden_states_tuple=res_samples,
188
+ encoder_hidden_states=encoder_hidden_states,
189
+ cross_attention_kwargs=cross_attention_kwargs,
190
+ upsample_size=upsample_size,
191
+ attention_mask=attention_mask,
192
+ )
193
+ else:
194
+ sample = upsample_block(
195
+ hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
196
+ )
197
+ all_intermediate_features.append(sample)
198
+ # return early to save computation time if needed
199
+ if last_up_block_idx is not None and i == last_up_block_idx:
200
+ return all_intermediate_features
201
+
202
+ # 6. post-process
203
+ if self.conv_norm_out:
204
+ sample = self.conv_norm_out(sample)
205
+ sample = self.conv_act(sample)
206
+ sample = self.conv_out(sample)
207
+
208
+ # only difference from diffusers, return intermediate results
209
+ if return_intermediates:
210
+ return sample, all_intermediate_features
211
+ else:
212
+ return sample
213
+
214
+ return forward
215
+
216
+
217
+ class DragPipeline(StableDiffusionPipeline):
218
+
219
+ # must call this function when initialize
220
+ def modify_unet_forward(self):
221
+ self.unet.forward = override_forward(self.unet)
222
+
223
+ def inv_step(
224
+ self,
225
+ model_output: torch.FloatTensor,
226
+ timestep: int,
227
+ x: torch.FloatTensor,
228
+ eta=0.,
229
+ verbose=False
230
+ ):
231
+ """
232
+ Inverse sampling for DDIM Inversion
233
+ """
234
+ if verbose:
235
+ print("timestep: ", timestep)
236
+ next_step = timestep
237
+ timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999)
238
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod
239
+ alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step]
240
+ beta_prod_t = 1 - alpha_prod_t
241
+ pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
242
+ pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output
243
+ x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir
244
+ return x_next, pred_x0
245
+
246
+ def step(
247
+ self,
248
+ model_output: torch.FloatTensor,
249
+ timestep: int,
250
+ x: torch.FloatTensor,
251
+ ):
252
+ """
253
+ predict the sample of the next step in the denoise process.
254
+ """
255
+ prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
256
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
257
+ alpha_prod_t_prev = self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod
258
+ beta_prod_t = 1 - alpha_prod_t
259
+ pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
260
+ pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output
261
+ x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir
262
+ return x_prev, pred_x0
263
+
264
+ @torch.no_grad()
265
+ def image2latent(self, image):
266
+ DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
267
+ if type(image) is Image:
268
+ image = np.array(image)
269
+ image = torch.from_numpy(image).float() / 127.5 - 1
270
+ image = image.permute(2, 0, 1).unsqueeze(0).to(DEVICE)
271
+ # input image density range [-1, 1]
272
+ latents = self.vae.encode(image)['latent_dist'].mean
273
+ latents = latents * 0.18215
274
+ return latents
275
+
276
+ @torch.no_grad()
277
+ def latent2image(self, latents, return_type='np'):
278
+ latents = 1 / 0.18215 * latents.detach()
279
+ image = self.vae.decode(latents)['sample']
280
+ if return_type == 'np':
281
+ image = (image / 2 + 0.5).clamp(0, 1)
282
+ image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
283
+ image = (image * 255).astype(np.uint8)
284
+ elif return_type == "pt":
285
+ image = (image / 2 + 0.5).clamp(0, 1)
286
+
287
+ return image
288
+
289
+ def latent2image_grad(self, latents):
290
+ latents = 1 / 0.18215 * latents
291
+ image = self.vae.decode(latents)['sample']
292
+
293
+ return image # range [-1, 1]
294
+
295
+ @torch.no_grad()
296
+ def get_text_embeddings(self, prompt):
297
+ # text embeddings
298
+ text_input = self.tokenizer(
299
+ prompt,
300
+ padding="max_length",
301
+ max_length=77,
302
+ return_tensors="pt"
303
+ )
304
+ text_embeddings = self.text_encoder(text_input.input_ids.cuda())[0]
305
+ return text_embeddings
306
+
307
+ # get all intermediate features and then do bilinear interpolation
308
+ # return features in the layer_idx list
309
+ def forward_unet_features(self, z, t, encoder_hidden_states, layer_idx=[0], interp_res_h=256, interp_res_w=256):
310
+ unet_output, all_intermediate_features = self.unet(
311
+ z,
312
+ t,
313
+ encoder_hidden_states=encoder_hidden_states,
314
+ return_intermediates=True
315
+ )
316
+
317
+ all_return_features = []
318
+ for idx in layer_idx:
319
+ feat = all_intermediate_features[idx]
320
+ feat = F.interpolate(feat, (interp_res_h, interp_res_w), mode='bilinear')
321
+ all_return_features.append(feat)
322
+ return_features = torch.cat(all_return_features, dim=1)
323
+ return unet_output, return_features
324
+
325
+ @torch.no_grad()
326
+ def __call__(
327
+ self,
328
+ prompt,
329
+ prompt_embeds=None, # whether text embedding is directly provided.
330
+ batch_size=1,
331
+ height=512,
332
+ width=512,
333
+ num_inference_steps=50,
334
+ num_actual_inference_steps=None,
335
+ guidance_scale=7.5,
336
+ latents=None,
337
+ unconditioning=None,
338
+ neg_prompt=None,
339
+ return_intermediates=False,
340
+ **kwds):
341
+ DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
342
+
343
+ if prompt_embeds is None:
344
+ if isinstance(prompt, list):
345
+ batch_size = len(prompt)
346
+ elif isinstance(prompt, str):
347
+ if batch_size > 1:
348
+ prompt = [prompt] * batch_size
349
+
350
+ # text embeddings
351
+ text_input = self.tokenizer(
352
+ prompt,
353
+ padding="max_length",
354
+ max_length=77,
355
+ return_tensors="pt"
356
+ )
357
+ text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]
358
+ else:
359
+ batch_size = prompt_embeds.shape[0]
360
+ text_embeddings = prompt_embeds
361
+ print("input text embeddings :", text_embeddings.shape)
362
+
363
+ # define initial latents if not predefined
364
+ if latents is None:
365
+ latents_shape = (batch_size, self.unet.in_channels, height//8, width//8)
366
+ latents = torch.randn(latents_shape, device=DEVICE, dtype=self.vae.dtype)
367
+
368
+ # unconditional embedding for classifier free guidance
369
+ if guidance_scale > 1.:
370
+ if neg_prompt:
371
+ uc_text = neg_prompt
372
+ else:
373
+ uc_text = ""
374
+ unconditional_input = self.tokenizer(
375
+ [uc_text] * batch_size,
376
+ padding="max_length",
377
+ max_length=77,
378
+ return_tensors="pt"
379
+ )
380
+ unconditional_embeddings = self.text_encoder(unconditional_input.input_ids.to(DEVICE))[0]
381
+ text_embeddings = torch.cat([unconditional_embeddings, text_embeddings], dim=0)
382
+
383
+ print("latents shape: ", latents.shape)
384
+ # iterative sampling
385
+ self.scheduler.set_timesteps(num_inference_steps)
386
+ # print("Valid timesteps: ", reversed(self.scheduler.timesteps))
387
+ latents_list = [latents]
388
+ for i, t in enumerate(tqdm(self.scheduler.timesteps, desc="DDIM Sampler")):
389
+ if num_actual_inference_steps is not None and i < num_inference_steps - num_actual_inference_steps:
390
+ continue
391
+
392
+ if guidance_scale > 1.:
393
+ model_inputs = torch.cat([latents] * 2)
394
+ else:
395
+ model_inputs = latents
396
+ if unconditioning is not None and isinstance(unconditioning, list):
397
+ _, text_embeddings = text_embeddings.chunk(2)
398
+ text_embeddings = torch.cat([unconditioning[i].expand(*text_embeddings.shape), text_embeddings])
399
+ # predict the noise
400
+ noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings)
401
+ if guidance_scale > 1.0:
402
+ noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)
403
+ noise_pred = noise_pred_uncon + guidance_scale * (noise_pred_con - noise_pred_uncon)
404
+ # compute the previous noise sample x_t -> x_t-1
405
+ # YUJUN: right now, the only difference between step here and step in scheduler
406
+ # is that scheduler version would clamp pred_x0 between [-1,1]
407
+ # don't know if that's gonna have huge impact
408
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
409
+ latents_list.append(latents)
410
+
411
+ image = self.latent2image(latents, return_type="pt")
412
+ if return_intermediates:
413
+ return image, latents_list
414
+ return image
415
+
416
+ @torch.no_grad()
417
+ def invert(
418
+ self,
419
+ image: torch.Tensor,
420
+ prompt,
421
+ num_inference_steps=50,
422
+ num_actual_inference_steps=None,
423
+ guidance_scale=7.5,
424
+ eta=0.0,
425
+ return_intermediates=False,
426
+ **kwds):
427
+ """
428
+ invert a real image into noise map with determinisc DDIM inversion
429
+ """
430
+ DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
431
+ batch_size = image.shape[0]
432
+ if isinstance(prompt, list):
433
+ if batch_size == 1:
434
+ image = image.expand(len(prompt), -1, -1, -1)
435
+ elif isinstance(prompt, str):
436
+ if batch_size > 1:
437
+ prompt = [prompt] * batch_size
438
+
439
+ # text embeddings
440
+ text_input = self.tokenizer(
441
+ prompt,
442
+ padding="max_length",
443
+ max_length=77,
444
+ return_tensors="pt"
445
+ )
446
+ text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]
447
+ print("input text embeddings :", text_embeddings.shape)
448
+ # define initial latents
449
+ latents = self.image2latent(image)
450
+
451
+ # unconditional embedding for classifier free guidance
452
+ if guidance_scale > 1.:
453
+ max_length = text_input.input_ids.shape[-1]
454
+ unconditional_input = self.tokenizer(
455
+ [""] * batch_size,
456
+ padding="max_length",
457
+ max_length=77,
458
+ return_tensors="pt"
459
+ )
460
+ unconditional_embeddings = self.text_encoder(unconditional_input.input_ids.to(DEVICE))[0]
461
+ text_embeddings = torch.cat([unconditional_embeddings, text_embeddings], dim=0)
462
+
463
+ print("latents shape: ", latents.shape)
464
+ # interative sampling
465
+ self.scheduler.set_timesteps(num_inference_steps)
466
+ print("Valid timesteps: ", reversed(self.scheduler.timesteps))
467
+ # print("attributes: ", self.scheduler.__dict__)
468
+ latents_list = [latents]
469
+ pred_x0_list = [latents]
470
+ for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")):
471
+ if num_actual_inference_steps is not None and i >= num_actual_inference_steps:
472
+ continue
473
+
474
+ if guidance_scale > 1.:
475
+ model_inputs = torch.cat([latents] * 2)
476
+ else:
477
+ model_inputs = latents
478
+
479
+ # predict the noise
480
+ noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings)
481
+ if guidance_scale > 1.:
482
+ noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)
483
+ noise_pred = noise_pred_uncon + guidance_scale * (noise_pred_con - noise_pred_uncon)
484
+ # compute the previous noise sample x_t-1 -> x_t
485
+ latents, pred_x0 = self.inv_step(noise_pred, t, latents)
486
+ latents_list.append(latents)
487
+ pred_x0_list.append(pred_x0)
488
+
489
+ if return_intermediates:
490
+ # return the intermediate laters during inversion
491
+ # pred_x0_list = [self.latent2image(img, return_type="pt") for img in pred_x0_list]
492
+ return latents, latents_list
493
+ return latents
drag_ui.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # *************************************************************************
2
+ # Copyright (2023) Bytedance Inc.
3
+ #
4
+ # Copyright (2023) DragDiffusion Authors
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ # *************************************************************************
18
+
19
+ import os
20
+ import gradio as gr
21
+
22
+ from utils.ui_utils import get_points, undo_points
23
+ from utils.ui_utils import clear_all, store_img, train_lora_interface, run_drag
24
+ from utils.ui_utils import clear_all_gen, store_img_gen, gen_img, run_drag_gen
25
+
26
+ LENGTH=480 # length of the square area displaying/editing images
27
+
28
+ with gr.Blocks() as demo:
29
+ # layout definition
30
+ with gr.Row():
31
+ gr.Markdown("""
32
+ # Official Implementation of [DragDiffusion](https://arxiv.org/abs/2306.14435)
33
+ """)
34
+
35
+ # UI components for editing real images
36
+ with gr.Tab(label="Editing Real Image"):
37
+ mask = gr.State(value=None) # store mask
38
+ selected_points = gr.State([]) # store points
39
+ original_image = gr.State(value=None) # store original input image
40
+ with gr.Row():
41
+ with gr.Column():
42
+ gr.Markdown("""<p style="text-align: center; font-size: 20px">Draw Mask</p>""")
43
+ canvas = gr.Image(type="numpy", tool="sketch", label="Draw Mask",
44
+ show_label=True, height=LENGTH, width=LENGTH) # for mask painting
45
+ train_lora_button = gr.Button("Train LoRA")
46
+ with gr.Column():
47
+ gr.Markdown("""<p style="text-align: center; font-size: 20px">Click Points</p>""")
48
+ input_image = gr.Image(type="numpy", label="Click Points",
49
+ show_label=True, height=LENGTH, width=LENGTH) # for points clicking
50
+ undo_button = gr.Button("Undo point")
51
+ with gr.Column():
52
+ gr.Markdown("""<p style="text-align: center; font-size: 20px">Editing Results</p>""")
53
+ output_image = gr.Image(type="numpy", label="Editing Results",
54
+ show_label=True, height=LENGTH, width=LENGTH)
55
+ with gr.Row():
56
+ run_button = gr.Button("Run")
57
+ clear_all_button = gr.Button("Clear All")
58
+
59
+ # general parameters
60
+ with gr.Row():
61
+ prompt = gr.Textbox(label="Prompt")
62
+ lora_path = gr.Textbox(value="./lora_tmp", label="LoRA path")
63
+ lora_status_bar = gr.Textbox(label="display LoRA training status")
64
+
65
+ # algorithm specific parameters
66
+ with gr.Tab("Drag Config"):
67
+ with gr.Row():
68
+ n_pix_step = gr.Number(
69
+ value=40,
70
+ label="number of pixel steps",
71
+ info="Number of gradient descent (motion supervision) steps on latent.",
72
+ precision=0)
73
+ lam = gr.Number(value=0.1, label="lam", info="regularization strength on unmasked areas")
74
+ # n_actual_inference_step = gr.Number(value=40, label="optimize latent step", precision=0)
75
+ inversion_strength = gr.Slider(0, 1.0,
76
+ value=0.75,
77
+ label="inversion strength",
78
+ info="The latent at [inversion-strength * total-sampling-steps] is optimized for dragging.")
79
+ latent_lr = gr.Number(value=0.01, label="latent lr")
80
+ start_step = gr.Number(value=0, label="start_step", precision=0, visible=False)
81
+ start_layer = gr.Number(value=10, label="start_layer", precision=0, visible=False)
82
+
83
+ with gr.Tab("Base Model Config"):
84
+ with gr.Row():
85
+ local_models_dir = 'local_pretrained_models'
86
+ local_models_choice = \
87
+ [os.path.join(local_models_dir,d) for d in os.listdir(local_models_dir) if os.path.isdir(os.path.join(local_models_dir,d))]
88
+ model_path = gr.Dropdown(value="runwayml/stable-diffusion-v1-5",
89
+ label="Diffusion Model Path",
90
+ choices=[
91
+ "runwayml/stable-diffusion-v1-5",
92
+ ] + local_models_choice
93
+ )
94
+ vae_path = gr.Dropdown(value="default",
95
+ label="VAE choice",
96
+ choices=["default",
97
+ "stabilityai/sd-vae-ft-mse"] + local_models_choice
98
+ )
99
+
100
+ with gr.Tab("LoRA Parameters"):
101
+ with gr.Row():
102
+ lora_step = gr.Number(value=200, label="LoRA training steps", precision=0)
103
+ lora_lr = gr.Number(value=0.0002, label="LoRA learning rate")
104
+ lora_rank = gr.Number(value=16, label="LoRA rank", precision=0)
105
+
106
+ # UI components for editing generated images
107
+ with gr.Tab(label="Editing Generated Image"):
108
+ mask_gen = gr.State(value=None) # store mask
109
+ selected_points_gen = gr.State([]) # store points
110
+ original_image_gen = gr.State(value=None) # store the diffusion-generated image
111
+ intermediate_latents_gen = gr.State(value=None) # store the intermediate diffusion latent during generation
112
+ with gr.Row():
113
+ with gr.Column():
114
+ gr.Markdown("""<p style="text-align: center; font-size: 20px">Draw Mask</p>""")
115
+ canvas_gen = gr.Image(type="numpy", tool="sketch", label="Draw Mask",
116
+ show_label=True, height=LENGTH, width=LENGTH) # for mask painting
117
+ gen_img_button = gr.Button("Generate Image")
118
+ with gr.Column():
119
+ gr.Markdown("""<p style="text-align: center; font-size: 20px">Click Points</p>""")
120
+ input_image_gen = gr.Image(type="numpy", label="Click Points",
121
+ show_label=True, height=LENGTH, width=LENGTH) # for points clicking
122
+ undo_button_gen = gr.Button("Undo point")
123
+ with gr.Column():
124
+ gr.Markdown("""<p style="text-align: center; font-size: 20px">Editing Results</p>""")
125
+ output_image_gen = gr.Image(type="numpy", label="Editing Results",
126
+ show_label=True, height=LENGTH, width=LENGTH)
127
+ with gr.Row():
128
+ run_button_gen = gr.Button("Run")
129
+ clear_all_button_gen = gr.Button("Clear All")
130
+
131
+ # general parameters
132
+ with gr.Row():
133
+ pos_prompt_gen = gr.Textbox(label="Positive Prompt")
134
+ neg_prompt_gen = gr.Textbox(label="Negative Prompt")
135
+
136
+ with gr.Tab("Generation Config"):
137
+ with gr.Row():
138
+ local_models_dir = 'local_pretrained_models'
139
+ local_models_choice = \
140
+ [os.path.join(local_models_dir,d) for d in os.listdir(local_models_dir) if os.path.isdir(os.path.join(local_models_dir,d))]
141
+ model_path_gen = gr.Dropdown(value="runwayml/stable-diffusion-v1-5",
142
+ label="Diffusion Model Path",
143
+ choices=[
144
+ "runwayml/stable-diffusion-v1-5",
145
+ "gsdf/Counterfeit-V2.5",
146
+ "emilianJR/majicMIX_realistic",
147
+ "SG161222/Realistic_Vision_V2.0",
148
+ "stablediffusionapi/landscapesupermix",
149
+ "huangzhe0803/ArchitectureRealMix",
150
+ "stablediffusionapi/interiordesignsuperm"
151
+ ] + local_models_choice
152
+ )
153
+ vae_path_gen = gr.Dropdown(value="default",
154
+ label="VAE choice",
155
+ choices=["default",
156
+ "stabilityai/sd-vae-ft-mse"] + local_models_choice
157
+ )
158
+ lora_path_gen = gr.Textbox(value="", label="LoRA path")
159
+ gen_seed = gr.Number(value=65536, label="Generation Seed", precision=0)
160
+ height = gr.Number(value=512, label="Height", precision=0)
161
+ width = gr.Number(value=512, label="Width", precision=0)
162
+ guidance_scale = gr.Number(value=7.5, label="CFG Scale")
163
+ scheduler_name_gen = gr.Dropdown(
164
+ value="DDIM",
165
+ label="Scheduler",
166
+ choices=[
167
+ "DDIM",
168
+ "DPM++2M",
169
+ "DPM++2M_karras"
170
+ ]
171
+ )
172
+ n_inference_step_gen = gr.Number(value=50, label="Total Sampling Steps", precision=0)
173
+
174
+ with gr.Tab(label="Drag Config"):
175
+ with gr.Row():
176
+ n_pix_step_gen = gr.Number(
177
+ value=40,
178
+ label="Number of Pixel Steps",
179
+ info="Number of gradient descent (motion supervision) steps on latent.",
180
+ precision=0)
181
+ lam_gen = gr.Number(value=0.1, label="lam", info="regularization strength on unmasked areas")
182
+ # n_actual_inference_step_gen = gr.Number(value=40, label="optimize latent step", precision=0)
183
+ inversion_strength_gen = gr.Slider(0, 1.0,
184
+ value=0.75,
185
+ label="Inversion Strength",
186
+ info="The latent at [inversion-strength * total-sampling-steps] is optimized for dragging.")
187
+ latent_lr_gen = gr.Number(value=0.01, label="latent lr")
188
+ start_step_gen = gr.Number(value=0, label="start_step", precision=0, visible=False)
189
+ start_layer_gen = gr.Number(value=10, label="start_layer", precision=0, visible=False)
190
+ # Add a checkbox for users to select if they want a GIF of the process
191
+ with gr.Row():
192
+ create_gif_checkbox = gr.Checkbox(label="create_GIF", value=False)
193
+ create_tracking_point_checkbox = gr.Checkbox(label="create_tracking_point", value=False)
194
+ gif_interval = gr.Number(value=10, label="interval_GIF", precision=0, info="The interval of the GIF, i.e. the number of steps between each frame of the GIF.")
195
+ gif_fps = gr.Number(value=1, label="fps_GIF", precision=0, info="The fps of the GIF, i.e. the number of frames per second of the GIF.")
196
+
197
+ # event definition
198
+ # event for dragging user-input real image
199
+ canvas.edit(
200
+ store_img,
201
+ [canvas],
202
+ [original_image, selected_points, input_image, mask]
203
+ )
204
+ input_image.select(
205
+ get_points,
206
+ [input_image, selected_points],
207
+ [input_image],
208
+ )
209
+ undo_button.click(
210
+ undo_points,
211
+ [original_image, mask],
212
+ [input_image, selected_points]
213
+ )
214
+ train_lora_button.click(
215
+ train_lora_interface,
216
+ [original_image,
217
+ prompt,
218
+ model_path,
219
+ vae_path,
220
+ lora_path,
221
+ lora_step,
222
+ lora_lr,
223
+ lora_rank],
224
+ [lora_status_bar]
225
+ )
226
+ run_button.click(
227
+ run_drag,
228
+ [original_image,
229
+ input_image,
230
+ mask,
231
+ prompt,
232
+ selected_points,
233
+ inversion_strength,
234
+ lam,
235
+ latent_lr,
236
+ n_pix_step,
237
+ model_path,
238
+ vae_path,
239
+ lora_path,
240
+ start_step,
241
+ start_layer,
242
+ create_gif_checkbox,
243
+ gif_interval,
244
+ ],
245
+ [output_image]
246
+ )
247
+ clear_all_button.click(
248
+ clear_all,
249
+ [gr.Number(value=LENGTH, visible=False, precision=0)],
250
+ [canvas,
251
+ input_image,
252
+ output_image,
253
+ selected_points,
254
+ original_image,
255
+ mask]
256
+ )
257
+
258
+ # event for dragging generated image
259
+ canvas_gen.edit(
260
+ store_img_gen,
261
+ [canvas_gen],
262
+ [original_image_gen, selected_points_gen, input_image_gen, mask_gen]
263
+ )
264
+ input_image_gen.select(
265
+ get_points,
266
+ [input_image_gen, selected_points_gen],
267
+ [input_image_gen],
268
+ )
269
+ gen_img_button.click(
270
+ gen_img,
271
+ [
272
+ gr.Number(value=LENGTH, visible=False, precision=0),
273
+ height,
274
+ width,
275
+ n_inference_step_gen,
276
+ scheduler_name_gen,
277
+ gen_seed,
278
+ guidance_scale,
279
+ pos_prompt_gen,
280
+ neg_prompt_gen,
281
+ model_path_gen,
282
+ vae_path_gen,
283
+ lora_path_gen,
284
+ ],
285
+ [canvas_gen, input_image_gen, output_image_gen, mask_gen, intermediate_latents_gen]
286
+ )
287
+ undo_button_gen.click(
288
+ undo_points,
289
+ [original_image_gen, mask_gen],
290
+ [input_image_gen, selected_points_gen]
291
+ )
292
+ run_button_gen.click(
293
+ run_drag_gen,
294
+ [
295
+ n_inference_step_gen,
296
+ scheduler_name_gen,
297
+ original_image_gen, # the original image generated by the diffusion model
298
+ input_image_gen, # image with clicking, masking, etc.
299
+ intermediate_latents_gen,
300
+ guidance_scale,
301
+ mask_gen,
302
+ pos_prompt_gen,
303
+ neg_prompt_gen,
304
+ selected_points_gen,
305
+ inversion_strength_gen,
306
+ lam_gen,
307
+ latent_lr_gen,
308
+ n_pix_step_gen,
309
+ model_path_gen,
310
+ vae_path_gen,
311
+ lora_path_gen,
312
+ start_step_gen,
313
+ start_layer_gen,
314
+ create_gif_checkbox,
315
+ create_tracking_point_checkbox,
316
+ gif_interval,
317
+ gif_fps
318
+ ],
319
+ [output_image_gen]
320
+ )
321
+ clear_all_button_gen.click(
322
+ clear_all_gen,
323
+ [gr.Number(value=LENGTH, visible=False, precision=0)],
324
+ [canvas_gen,
325
+ input_image_gen,
326
+ output_image_gen,
327
+ selected_points_gen,
328
+ original_image_gen,
329
+ mask_gen,
330
+ intermediate_latents_gen,
331
+ ]
332
+ )
333
+
334
+
335
+ demo.queue().launch(share=True, debug=True)
environment.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: dragdiff
2
+ channels:
3
+ - pytorch
4
+ - defaults
5
+ - nvidia
6
+ - conda-forge
7
+ dependencies:
8
+ - python=3.8.5
9
+ - pip=22.3.1
10
+ - cudatoolkit=11.7
11
+ - pip:
12
+ - torch==2.0.0
13
+ - torchvision==0.15.1
14
+ - gradio==3.41.1
15
+ - pydantic==2.0.2
16
+ - albumentations==1.3.0
17
+ - opencv-contrib-python==4.3.0.38
18
+ - imageio==2.9.0
19
+ - imageio-ffmpeg==0.4.2
20
+ - pytorch-lightning==1.5.0
21
+ - omegaconf==2.3.0
22
+ - test-tube>=0.7.5
23
+ - streamlit==1.12.1
24
+ - einops==0.6.0
25
+ - transformers==4.27.0
26
+ - webdataset==0.2.5
27
+ - kornia==0.6
28
+ - open_clip_torch==2.16.0
29
+ - invisible-watermark>=0.1.5
30
+ - streamlit-drawable-canvas==0.8.0
31
+ - torchmetrics==0.6.0
32
+ - timm==0.6.12
33
+ - addict==2.4.0
34
+ - yapf==0.32.0
35
+ - prettytable==3.6.0
36
+ - safetensors==0.2.7
37
+ - basicsr==1.4.2
38
+ - accelerate==0.17.0
39
+ - decord==0.6.0
40
+ - diffusers==0.17.1
41
+ - moviepy==1.0.3
42
+ - opencv_python==4.7.0.68
43
+ - Pillow==9.4.0
44
+ - scikit_image==0.19.3
45
+ - scipy==1.10.1
46
+ - tensorboardX==2.6
47
+ - tqdm==4.64.1
48
+ - numpy==1.24.1
local_pretrained_models/dummy.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ You may put your pretrained model here.
lora/lora_ckpt/dummy.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ lora checkpoints will be saved in this folder
lora/samples/cat_dog/andrew-s-ouo1hbizWwo-unsplash.jpg ADDED
lora/samples/oilpaint1/catherine-kay-greenup-6rhUen8Wrao-unsplash.jpg ADDED
lora/samples/oilpaint2/birmingham-museums-trust-wKlHsooRVbg-unsplash.jpg ADDED
lora/samples/prompts.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # prompts we used when editing the given samples:
2
+
3
+ cat_dog: a photo of a cat and a dog
4
+ oilpaint1: an oil painting of a mountain besides a lake
5
+ oilpaint2: an oil painting of a mountain and forest
6
+ sculpture: a photo of a sculpture
lora/samples/sculpture/evan-lee-EdAVNRvUVH4-unsplash.jpg ADDED
lora/train_dreambooth_lora.py ADDED
@@ -0,0 +1,1324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # *************************************************************************
2
+ # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
3
+ # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
4
+ # ytedance Inc..
5
+ # *************************************************************************
6
+
7
+ import argparse
8
+ import gc
9
+ import hashlib
10
+ import itertools
11
+ import logging
12
+ import math
13
+ import os
14
+ import warnings
15
+ from pathlib import Path
16
+
17
+ import numpy as np
18
+ import torch
19
+ import torch.nn.functional as F
20
+ import torch.utils.checkpoint
21
+ import transformers
22
+ from accelerate import Accelerator
23
+ from accelerate.logging import get_logger
24
+ from accelerate.utils import ProjectConfiguration, set_seed
25
+ from huggingface_hub import create_repo, upload_folder
26
+ from packaging import version
27
+ from PIL import Image
28
+ from PIL.ImageOps import exif_transpose
29
+ from torch.utils.data import Dataset
30
+ from torchvision import transforms
31
+ from tqdm.auto import tqdm
32
+ from transformers import AutoTokenizer, PretrainedConfig
33
+
34
+ import diffusers
35
+ from diffusers import (
36
+ AutoencoderKL,
37
+ DDPMScheduler,
38
+ DiffusionPipeline,
39
+ DPMSolverMultistepScheduler,
40
+ StableDiffusionPipeline,
41
+ UNet2DConditionModel,
42
+ )
43
+ from diffusers.loaders import AttnProcsLayers, LoraLoaderMixin
44
+ from diffusers.models.attention_processor import (
45
+ AttnAddedKVProcessor,
46
+ AttnAddedKVProcessor2_0,
47
+ LoRAAttnAddedKVProcessor,
48
+ LoRAAttnProcessor,
49
+ LoRAAttnProcessor2_0,
50
+ SlicedAttnAddedKVProcessor,
51
+ )
52
+ from diffusers.optimization import get_scheduler
53
+ from diffusers.utils import TEXT_ENCODER_ATTN_MODULE, check_min_version, is_wandb_available
54
+ from diffusers.utils.import_utils import is_xformers_available
55
+
56
+
57
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
58
+ check_min_version("0.17.0")
59
+
60
+ logger = get_logger(__name__)
61
+
62
+
63
+ def save_model_card(
64
+ repo_id: str,
65
+ images=None,
66
+ base_model=str,
67
+ train_text_encoder=False,
68
+ prompt=str,
69
+ repo_folder=None,
70
+ pipeline: DiffusionPipeline = None,
71
+ ):
72
+ img_str = ""
73
+ for i, image in enumerate(images):
74
+ image.save(os.path.join(repo_folder, f"image_{i}.png"))
75
+ img_str += f"![img_{i}](./image_{i}.png)\n"
76
+
77
+ yaml = f"""
78
+ ---
79
+ license: creativeml-openrail-m
80
+ base_model: {base_model}
81
+ instance_prompt: {prompt}
82
+ tags:
83
+ - {'stable-diffusion' if isinstance(pipeline, StableDiffusionPipeline) else 'if'}
84
+ - {'stable-diffusion-diffusers' if isinstance(pipeline, StableDiffusionPipeline) else 'if-diffusers'}
85
+ - text-to-image
86
+ - diffusers
87
+ - lora
88
+ inference: true
89
+ ---
90
+ """
91
+ model_card = f"""
92
+ # LoRA DreamBooth - {repo_id}
93
+
94
+ These are LoRA adaption weights for {base_model}. The weights were trained on {prompt} using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. \n
95
+ {img_str}
96
+
97
+ LoRA for the text encoder was enabled: {train_text_encoder}.
98
+ """
99
+ with open(os.path.join(repo_folder, "README.md"), "w") as f:
100
+ f.write(yaml + model_card)
101
+
102
+
103
+ def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
104
+ text_encoder_config = PretrainedConfig.from_pretrained(
105
+ pretrained_model_name_or_path,
106
+ subfolder="text_encoder",
107
+ revision=revision,
108
+ )
109
+ model_class = text_encoder_config.architectures[0]
110
+
111
+ if model_class == "CLIPTextModel":
112
+ from transformers import CLIPTextModel
113
+
114
+ return CLIPTextModel
115
+ elif model_class == "RobertaSeriesModelWithTransformation":
116
+ from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
117
+
118
+ return RobertaSeriesModelWithTransformation
119
+ elif model_class == "T5EncoderModel":
120
+ from transformers import T5EncoderModel
121
+
122
+ return T5EncoderModel
123
+ else:
124
+ raise ValueError(f"{model_class} is not supported.")
125
+
126
+
127
+ def parse_args(input_args=None):
128
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
129
+ parser.add_argument(
130
+ "--pretrained_model_name_or_path",
131
+ type=str,
132
+ default=None,
133
+ required=True,
134
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
135
+ )
136
+ parser.add_argument(
137
+ "--revision",
138
+ type=str,
139
+ default=None,
140
+ required=False,
141
+ help="Revision of pretrained model identifier from huggingface.co/models.",
142
+ )
143
+ parser.add_argument(
144
+ "--tokenizer_name",
145
+ type=str,
146
+ default=None,
147
+ help="Pretrained tokenizer name or path if not the same as model_name",
148
+ )
149
+ parser.add_argument(
150
+ "--instance_data_dir",
151
+ type=str,
152
+ default=None,
153
+ required=True,
154
+ help="A folder containing the training data of instance images.",
155
+ )
156
+ parser.add_argument(
157
+ "--class_data_dir",
158
+ type=str,
159
+ default=None,
160
+ required=False,
161
+ help="A folder containing the training data of class images.",
162
+ )
163
+ parser.add_argument(
164
+ "--instance_prompt",
165
+ type=str,
166
+ default=None,
167
+ required=True,
168
+ help="The prompt with identifier specifying the instance",
169
+ )
170
+ parser.add_argument(
171
+ "--class_prompt",
172
+ type=str,
173
+ default=None,
174
+ help="The prompt to specify images in the same class as provided instance images.",
175
+ )
176
+ parser.add_argument(
177
+ "--validation_prompt",
178
+ type=str,
179
+ default=None,
180
+ help="A prompt that is used during validation to verify that the model is learning.",
181
+ )
182
+ parser.add_argument(
183
+ "--num_validation_images",
184
+ type=int,
185
+ default=4,
186
+ help="Number of images that should be generated during validation with `validation_prompt`.",
187
+ )
188
+ parser.add_argument(
189
+ "--validation_epochs",
190
+ type=int,
191
+ default=50,
192
+ help=(
193
+ "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt"
194
+ " `args.validation_prompt` multiple times: `args.num_validation_images`."
195
+ ),
196
+ )
197
+ parser.add_argument(
198
+ "--with_prior_preservation",
199
+ default=False,
200
+ action="store_true",
201
+ help="Flag to add prior preservation loss.",
202
+ )
203
+ parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
204
+ parser.add_argument(
205
+ "--num_class_images",
206
+ type=int,
207
+ default=100,
208
+ help=(
209
+ "Minimal class images for prior preservation loss. If there are not enough images already present in"
210
+ " class_data_dir, additional images will be sampled with class_prompt."
211
+ ),
212
+ )
213
+ parser.add_argument(
214
+ "--output_dir",
215
+ type=str,
216
+ default="lora-dreambooth-model",
217
+ help="The output directory where the model predictions and checkpoints will be written.",
218
+ )
219
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
220
+ parser.add_argument(
221
+ "--resolution",
222
+ type=int,
223
+ default=512,
224
+ help=(
225
+ "The resolution for input images, all the images in the train/validation dataset will be resized to this"
226
+ " resolution"
227
+ ),
228
+ )
229
+ parser.add_argument(
230
+ "--center_crop",
231
+ default=False,
232
+ action="store_true",
233
+ help=(
234
+ "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
235
+ " cropped. The images will be resized to the resolution first before cropping."
236
+ ),
237
+ )
238
+ parser.add_argument(
239
+ "--train_text_encoder",
240
+ action="store_true",
241
+ help="Whether to train the text encoder. If set, the text encoder should be float32 precision.",
242
+ )
243
+ parser.add_argument(
244
+ "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
245
+ )
246
+ parser.add_argument(
247
+ "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
248
+ )
249
+ parser.add_argument("--num_train_epochs", type=int, default=1)
250
+ parser.add_argument(
251
+ "--max_train_steps",
252
+ type=int,
253
+ default=None,
254
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
255
+ )
256
+ parser.add_argument(
257
+ "--checkpointing_steps",
258
+ type=int,
259
+ default=500,
260
+ help=(
261
+ "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
262
+ " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
263
+ " training using `--resume_from_checkpoint`."
264
+ ),
265
+ )
266
+ parser.add_argument(
267
+ "--checkpoints_total_limit",
268
+ type=int,
269
+ default=None,
270
+ help=(
271
+ "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
272
+ " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
273
+ " for more docs"
274
+ ),
275
+ )
276
+ parser.add_argument(
277
+ "--resume_from_checkpoint",
278
+ type=str,
279
+ default=None,
280
+ help=(
281
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
282
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
283
+ ),
284
+ )
285
+ parser.add_argument(
286
+ "--gradient_accumulation_steps",
287
+ type=int,
288
+ default=1,
289
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
290
+ )
291
+ parser.add_argument(
292
+ "--gradient_checkpointing",
293
+ action="store_true",
294
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
295
+ )
296
+ parser.add_argument(
297
+ "--learning_rate",
298
+ type=float,
299
+ default=5e-4,
300
+ help="Initial learning rate (after the potential warmup period) to use.",
301
+ )
302
+ parser.add_argument(
303
+ "--scale_lr",
304
+ action="store_true",
305
+ default=False,
306
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
307
+ )
308
+ parser.add_argument(
309
+ "--lr_scheduler",
310
+ type=str,
311
+ default="constant",
312
+ help=(
313
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
314
+ ' "constant", "constant_with_warmup"]'
315
+ ),
316
+ )
317
+ parser.add_argument(
318
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
319
+ )
320
+ parser.add_argument(
321
+ "--lr_num_cycles",
322
+ type=int,
323
+ default=1,
324
+ help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
325
+ )
326
+ parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
327
+ parser.add_argument(
328
+ "--dataloader_num_workers",
329
+ type=int,
330
+ default=0,
331
+ help=(
332
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
333
+ ),
334
+ )
335
+ parser.add_argument(
336
+ "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
337
+ )
338
+ parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
339
+ parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
340
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
341
+ parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
342
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
343
+ parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
344
+ parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
345
+ parser.add_argument(
346
+ "--hub_model_id",
347
+ type=str,
348
+ default=None,
349
+ help="The name of the repository to keep in sync with the local `output_dir`.",
350
+ )
351
+ parser.add_argument(
352
+ "--logging_dir",
353
+ type=str,
354
+ default="logs",
355
+ help=(
356
+ "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
357
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
358
+ ),
359
+ )
360
+ parser.add_argument(
361
+ "--allow_tf32",
362
+ action="store_true",
363
+ help=(
364
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
365
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
366
+ ),
367
+ )
368
+ parser.add_argument(
369
+ "--report_to",
370
+ type=str,
371
+ default="tensorboard",
372
+ help=(
373
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
374
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
375
+ ),
376
+ )
377
+ parser.add_argument(
378
+ "--mixed_precision",
379
+ type=str,
380
+ default=None,
381
+ choices=["no", "fp16", "bf16"],
382
+ help=(
383
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
384
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
385
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
386
+ ),
387
+ )
388
+ parser.add_argument(
389
+ "--prior_generation_precision",
390
+ type=str,
391
+ default=None,
392
+ choices=["no", "fp32", "fp16", "bf16"],
393
+ help=(
394
+ "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
395
+ " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
396
+ ),
397
+ )
398
+ parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
399
+ parser.add_argument(
400
+ "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
401
+ )
402
+ parser.add_argument(
403
+ "--pre_compute_text_embeddings",
404
+ action="store_true",
405
+ help="Whether or not to pre-compute text embeddings. If text embeddings are pre-computed, the text encoder will not be kept in memory during training and will leave more GPU memory available for training the rest of the model. This is not compatible with `--train_text_encoder`.",
406
+ )
407
+ parser.add_argument(
408
+ "--tokenizer_max_length",
409
+ type=int,
410
+ default=None,
411
+ required=False,
412
+ help="The maximum length of the tokenizer. If not set, will default to the tokenizer's max length.",
413
+ )
414
+ parser.add_argument(
415
+ "--text_encoder_use_attention_mask",
416
+ action="store_true",
417
+ required=False,
418
+ help="Whether to use attention mask for the text encoder",
419
+ )
420
+ parser.add_argument(
421
+ "--validation_images",
422
+ required=False,
423
+ default=None,
424
+ nargs="+",
425
+ help="Optional set of images to use for validation. Used when the target pipeline takes an initial image as input such as when training image variation or superresolution.",
426
+ )
427
+ parser.add_argument(
428
+ "--class_labels_conditioning",
429
+ required=False,
430
+ default=None,
431
+ help="The optional `class_label` conditioning to pass to the unet, available values are `timesteps`.",
432
+ )
433
+ parser.add_argument(
434
+ "--lora_rank",
435
+ type=int,
436
+ default=4,
437
+ help="rank of lora."
438
+ )
439
+
440
+
441
+ if input_args is not None:
442
+ args = parser.parse_args(input_args)
443
+ else:
444
+ args = parser.parse_args()
445
+
446
+ env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
447
+ if env_local_rank != -1 and env_local_rank != args.local_rank:
448
+ args.local_rank = env_local_rank
449
+
450
+ if args.with_prior_preservation:
451
+ if args.class_data_dir is None:
452
+ raise ValueError("You must specify a data directory for class images.")
453
+ if args.class_prompt is None:
454
+ raise ValueError("You must specify prompt for class images.")
455
+ else:
456
+ # logger is not available yet
457
+ if args.class_data_dir is not None:
458
+ warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
459
+ if args.class_prompt is not None:
460
+ warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
461
+
462
+ if args.train_text_encoder and args.pre_compute_text_embeddings:
463
+ raise ValueError("`--train_text_encoder` cannot be used with `--pre_compute_text_embeddings`")
464
+
465
+ return args
466
+
467
+
468
+ class DreamBoothDataset(Dataset):
469
+ """
470
+ A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
471
+ It pre-processes the images and the tokenizes prompts.
472
+ """
473
+
474
+ def __init__(
475
+ self,
476
+ instance_data_root,
477
+ instance_prompt,
478
+ tokenizer,
479
+ class_data_root=None,
480
+ class_prompt=None,
481
+ class_num=None,
482
+ size=512,
483
+ center_crop=False,
484
+ encoder_hidden_states=None,
485
+ instance_prompt_encoder_hidden_states=None,
486
+ tokenizer_max_length=None,
487
+ ):
488
+ self.size = size
489
+ self.center_crop = center_crop
490
+ self.tokenizer = tokenizer
491
+ self.encoder_hidden_states = encoder_hidden_states
492
+ self.instance_prompt_encoder_hidden_states = instance_prompt_encoder_hidden_states
493
+ self.tokenizer_max_length = tokenizer_max_length
494
+
495
+ self.instance_data_root = Path(instance_data_root)
496
+ if not self.instance_data_root.exists():
497
+ raise ValueError("Instance images root doesn't exists.")
498
+
499
+ self.instance_images_path = list(Path(instance_data_root).iterdir())
500
+ self.num_instance_images = len(self.instance_images_path)
501
+ self.instance_prompt = instance_prompt
502
+ self._length = self.num_instance_images
503
+
504
+ if class_data_root is not None:
505
+ self.class_data_root = Path(class_data_root)
506
+ self.class_data_root.mkdir(parents=True, exist_ok=True)
507
+ self.class_images_path = list(self.class_data_root.iterdir())
508
+ if class_num is not None:
509
+ self.num_class_images = min(len(self.class_images_path), class_num)
510
+ else:
511
+ self.num_class_images = len(self.class_images_path)
512
+ self._length = max(self.num_class_images, self.num_instance_images)
513
+ self.class_prompt = class_prompt
514
+ else:
515
+ self.class_data_root = None
516
+
517
+ self.image_transforms = transforms.Compose(
518
+ [
519
+ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
520
+ transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
521
+ transforms.ToTensor(),
522
+ transforms.Normalize([0.5], [0.5]),
523
+ ]
524
+ )
525
+
526
+ def __len__(self):
527
+ return self._length
528
+
529
+ def __getitem__(self, index):
530
+ example = {}
531
+ instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
532
+ instance_image = exif_transpose(instance_image)
533
+
534
+ if not instance_image.mode == "RGB":
535
+ instance_image = instance_image.convert("RGB")
536
+ example["instance_images"] = self.image_transforms(instance_image)
537
+
538
+ if self.encoder_hidden_states is not None:
539
+ example["instance_prompt_ids"] = self.encoder_hidden_states
540
+ else:
541
+ text_inputs = tokenize_prompt(
542
+ self.tokenizer, self.instance_prompt, tokenizer_max_length=self.tokenizer_max_length
543
+ )
544
+ example["instance_prompt_ids"] = text_inputs.input_ids
545
+ example["instance_attention_mask"] = text_inputs.attention_mask
546
+
547
+ if self.class_data_root:
548
+ class_image = Image.open(self.class_images_path[index % self.num_class_images])
549
+ class_image = exif_transpose(class_image)
550
+
551
+ if not class_image.mode == "RGB":
552
+ class_image = class_image.convert("RGB")
553
+ example["class_images"] = self.image_transforms(class_image)
554
+
555
+ if self.instance_prompt_encoder_hidden_states is not None:
556
+ example["class_prompt_ids"] = self.instance_prompt_encoder_hidden_states
557
+ else:
558
+ class_text_inputs = tokenize_prompt(
559
+ self.tokenizer, self.class_prompt, tokenizer_max_length=self.tokenizer_max_length
560
+ )
561
+ example["class_prompt_ids"] = class_text_inputs.input_ids
562
+ example["class_attention_mask"] = class_text_inputs.attention_mask
563
+
564
+ return example
565
+
566
+
567
+ def collate_fn(examples, with_prior_preservation=False):
568
+ has_attention_mask = "instance_attention_mask" in examples[0]
569
+
570
+ input_ids = [example["instance_prompt_ids"] for example in examples]
571
+ pixel_values = [example["instance_images"] for example in examples]
572
+
573
+ if has_attention_mask:
574
+ attention_mask = [example["instance_attention_mask"] for example in examples]
575
+
576
+ # Concat class and instance examples for prior preservation.
577
+ # We do this to avoid doing two forward passes.
578
+ if with_prior_preservation:
579
+ input_ids += [example["class_prompt_ids"] for example in examples]
580
+ pixel_values += [example["class_images"] for example in examples]
581
+ if has_attention_mask:
582
+ attention_mask += [example["class_attention_mask"] for example in examples]
583
+
584
+ pixel_values = torch.stack(pixel_values)
585
+ pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
586
+
587
+ input_ids = torch.cat(input_ids, dim=0)
588
+
589
+ batch = {
590
+ "input_ids": input_ids,
591
+ "pixel_values": pixel_values,
592
+ }
593
+
594
+ if has_attention_mask:
595
+ batch["attention_mask"] = attention_mask
596
+
597
+ return batch
598
+
599
+
600
+ class PromptDataset(Dataset):
601
+ "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
602
+
603
+ def __init__(self, prompt, num_samples):
604
+ self.prompt = prompt
605
+ self.num_samples = num_samples
606
+
607
+ def __len__(self):
608
+ return self.num_samples
609
+
610
+ def __getitem__(self, index):
611
+ example = {}
612
+ example["prompt"] = self.prompt
613
+ example["index"] = index
614
+ return example
615
+
616
+
617
+ def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None):
618
+ if tokenizer_max_length is not None:
619
+ max_length = tokenizer_max_length
620
+ else:
621
+ max_length = tokenizer.model_max_length
622
+
623
+ text_inputs = tokenizer(
624
+ prompt,
625
+ truncation=True,
626
+ padding="max_length",
627
+ max_length=max_length,
628
+ return_tensors="pt",
629
+ )
630
+
631
+ return text_inputs
632
+
633
+
634
+ def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_attention_mask=None):
635
+ text_input_ids = input_ids.to(text_encoder.device)
636
+
637
+ if text_encoder_use_attention_mask:
638
+ attention_mask = attention_mask.to(text_encoder.device)
639
+ else:
640
+ attention_mask = None
641
+
642
+ prompt_embeds = text_encoder(
643
+ text_input_ids,
644
+ attention_mask=attention_mask,
645
+ )
646
+ prompt_embeds = prompt_embeds[0]
647
+
648
+ return prompt_embeds
649
+
650
+
651
+ def main(args):
652
+ logging_dir = Path(args.output_dir, args.logging_dir)
653
+
654
+ accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
655
+
656
+ accelerator = Accelerator(
657
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
658
+ mixed_precision=args.mixed_precision,
659
+ log_with=args.report_to,
660
+ logging_dir=logging_dir,
661
+ project_config=accelerator_project_config,
662
+ )
663
+
664
+ if args.report_to == "wandb":
665
+ if not is_wandb_available():
666
+ raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
667
+ import wandb
668
+
669
+ # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
670
+ # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
671
+ # TODO (sayakpaul): Remove this check when gradient accumulation with two models is enabled in accelerate.
672
+ if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
673
+ raise ValueError(
674
+ "Gradient accumulation is not supported when training the text encoder in distributed training. "
675
+ "Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
676
+ )
677
+
678
+ # Make one log on every process with the configuration for debugging.
679
+ logging.basicConfig(
680
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
681
+ datefmt="%m/%d/%Y %H:%M:%S",
682
+ level=logging.INFO,
683
+ )
684
+ logger.info(accelerator.state, main_process_only=False)
685
+ if accelerator.is_local_main_process:
686
+ transformers.utils.logging.set_verbosity_warning()
687
+ diffusers.utils.logging.set_verbosity_info()
688
+ else:
689
+ transformers.utils.logging.set_verbosity_error()
690
+ diffusers.utils.logging.set_verbosity_error()
691
+
692
+ # If passed along, set the training seed now.
693
+ if args.seed is not None:
694
+ set_seed(args.seed)
695
+
696
+ # Generate class images if prior preservation is enabled.
697
+ if args.with_prior_preservation:
698
+ class_images_dir = Path(args.class_data_dir)
699
+ if not class_images_dir.exists():
700
+ class_images_dir.mkdir(parents=True)
701
+ cur_class_images = len(list(class_images_dir.iterdir()))
702
+
703
+ if cur_class_images < args.num_class_images:
704
+ torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
705
+ if args.prior_generation_precision == "fp32":
706
+ torch_dtype = torch.float32
707
+ elif args.prior_generation_precision == "fp16":
708
+ torch_dtype = torch.float16
709
+ elif args.prior_generation_precision == "bf16":
710
+ torch_dtype = torch.bfloat16
711
+ pipeline = DiffusionPipeline.from_pretrained(
712
+ args.pretrained_model_name_or_path,
713
+ torch_dtype=torch_dtype,
714
+ safety_checker=None,
715
+ revision=args.revision,
716
+ )
717
+ pipeline.set_progress_bar_config(disable=True)
718
+
719
+ num_new_images = args.num_class_images - cur_class_images
720
+ logger.info(f"Number of class images to sample: {num_new_images}.")
721
+
722
+ sample_dataset = PromptDataset(args.class_prompt, num_new_images)
723
+ sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
724
+
725
+ sample_dataloader = accelerator.prepare(sample_dataloader)
726
+ pipeline.to(accelerator.device)
727
+
728
+ for example in tqdm(
729
+ sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
730
+ ):
731
+ images = pipeline(example["prompt"]).images
732
+
733
+ for i, image in enumerate(images):
734
+ hash_image = hashlib.sha1(image.tobytes()).hexdigest()
735
+ image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
736
+ image.save(image_filename)
737
+
738
+ del pipeline
739
+ if torch.cuda.is_available():
740
+ torch.cuda.empty_cache()
741
+
742
+ # Handle the repository creation
743
+ if accelerator.is_main_process:
744
+ if args.output_dir is not None:
745
+ os.makedirs(args.output_dir, exist_ok=True)
746
+
747
+ if args.push_to_hub:
748
+ repo_id = create_repo(
749
+ repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
750
+ ).repo_id
751
+
752
+ # Load the tokenizer
753
+ if args.tokenizer_name:
754
+ tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
755
+ elif args.pretrained_model_name_or_path:
756
+ tokenizer = AutoTokenizer.from_pretrained(
757
+ args.pretrained_model_name_or_path,
758
+ subfolder="tokenizer",
759
+ revision=args.revision,
760
+ use_fast=False,
761
+ )
762
+
763
+ # import correct text encoder class
764
+ text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
765
+
766
+ # Load scheduler and models
767
+ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
768
+ text_encoder = text_encoder_cls.from_pretrained(
769
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
770
+ )
771
+ try:
772
+ vae = AutoencoderKL.from_pretrained(
773
+ args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision
774
+ )
775
+ except OSError:
776
+ # IF does not have a VAE so let's just set it to None
777
+ # We don't have to error out here
778
+ vae = None
779
+
780
+ unet = UNet2DConditionModel.from_pretrained(
781
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
782
+ )
783
+
784
+ # We only train the additional adapter LoRA layers
785
+ if vae is not None:
786
+ vae.requires_grad_(False)
787
+ text_encoder.requires_grad_(False)
788
+ unet.requires_grad_(False)
789
+
790
+ # For mixed precision training we cast the text_encoder and vae weights to half-precision
791
+ # as these models are only used for inference, keeping weights in full precision is not required.
792
+ weight_dtype = torch.float32
793
+ if accelerator.mixed_precision == "fp16":
794
+ weight_dtype = torch.float16
795
+ elif accelerator.mixed_precision == "bf16":
796
+ weight_dtype = torch.bfloat16
797
+
798
+ # Move unet, vae and text_encoder to device and cast to weight_dtype
799
+ unet.to(accelerator.device, dtype=weight_dtype)
800
+ if vae is not None:
801
+ vae.to(accelerator.device, dtype=weight_dtype)
802
+ text_encoder.to(accelerator.device, dtype=weight_dtype)
803
+
804
+ if args.enable_xformers_memory_efficient_attention:
805
+ if is_xformers_available():
806
+ import xformers
807
+
808
+ xformers_version = version.parse(xformers.__version__)
809
+ if xformers_version == version.parse("0.0.16"):
810
+ logger.warn(
811
+ "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
812
+ )
813
+ unet.enable_xformers_memory_efficient_attention()
814
+ else:
815
+ raise ValueError("xformers is not available. Make sure it is installed correctly")
816
+
817
+ # now we will add new LoRA weights to the attention layers
818
+ # It's important to realize here how many attention weights will be added and of which sizes
819
+ # The sizes of the attention layers consist only of two different variables:
820
+ # 1) - the "hidden_size", which is increased according to `unet.config.block_out_channels`.
821
+ # 2) - the "cross attention size", which is set to `unet.config.cross_attention_dim`.
822
+
823
+ # Let's first see how many attention processors we will have to set.
824
+ # For Stable Diffusion, it should be equal to:
825
+ # - down blocks (2x attention layers) * (2x transformer layers) * (3x down blocks) = 12
826
+ # - mid blocks (2x attention layers) * (1x transformer layers) * (1x mid blocks) = 2
827
+ # - up blocks (2x attention layers) * (3x transformer layers) * (3x down blocks) = 18
828
+ # => 32 layers
829
+
830
+ # Set correct lora layers
831
+ unet_lora_attn_procs = {}
832
+ for name, attn_processor in unet.attn_processors.items():
833
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
834
+ if name.startswith("mid_block"):
835
+ hidden_size = unet.config.block_out_channels[-1]
836
+ elif name.startswith("up_blocks"):
837
+ block_id = int(name[len("up_blocks.")])
838
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
839
+ elif name.startswith("down_blocks"):
840
+ block_id = int(name[len("down_blocks.")])
841
+ hidden_size = unet.config.block_out_channels[block_id]
842
+
843
+ if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)):
844
+ lora_attn_processor_class = LoRAAttnAddedKVProcessor
845
+ else:
846
+ lora_attn_processor_class = (
847
+ LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
848
+ )
849
+ unet_lora_attn_procs[name] = lora_attn_processor_class(
850
+ hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=args.lora_rank
851
+ )
852
+
853
+ unet.set_attn_processor(unet_lora_attn_procs)
854
+ unet_lora_layers = AttnProcsLayers(unet.attn_processors)
855
+
856
+ # The text encoder comes from 🤗 transformers, so we cannot directly modify it.
857
+ # So, instead, we monkey-patch the forward calls of its attention-blocks. For this,
858
+ # we first load a dummy pipeline with the text encoder and then do the monkey-patching.
859
+ text_encoder_lora_layers = None
860
+ if args.train_text_encoder:
861
+ text_lora_attn_procs = {}
862
+ for name, module in text_encoder.named_modules():
863
+ if name.endswith(TEXT_ENCODER_ATTN_MODULE):
864
+ text_lora_attn_procs[name] = LoRAAttnProcessor(
865
+ hidden_size=module.out_proj.out_features, cross_attention_dim=None
866
+ )
867
+ text_encoder_lora_layers = AttnProcsLayers(text_lora_attn_procs)
868
+ temp_pipeline = DiffusionPipeline.from_pretrained(
869
+ args.pretrained_model_name_or_path, text_encoder=text_encoder
870
+ )
871
+ temp_pipeline._modify_text_encoder(text_lora_attn_procs)
872
+ text_encoder = temp_pipeline.text_encoder
873
+ del temp_pipeline
874
+
875
+ # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
876
+ def save_model_hook(models, weights, output_dir):
877
+ # there are only two options here. Either are just the unet attn processor layers
878
+ # or there are the unet and text encoder atten layers
879
+ unet_lora_layers_to_save = None
880
+ text_encoder_lora_layers_to_save = None
881
+
882
+ if args.train_text_encoder:
883
+ text_encoder_keys = accelerator.unwrap_model(text_encoder_lora_layers).state_dict().keys()
884
+ unet_keys = accelerator.unwrap_model(unet_lora_layers).state_dict().keys()
885
+
886
+ for model in models:
887
+ state_dict = model.state_dict()
888
+
889
+ if (
890
+ text_encoder_lora_layers is not None
891
+ and text_encoder_keys is not None
892
+ and state_dict.keys() == text_encoder_keys
893
+ ):
894
+ # text encoder
895
+ text_encoder_lora_layers_to_save = state_dict
896
+ elif state_dict.keys() == unet_keys:
897
+ # unet
898
+ unet_lora_layers_to_save = state_dict
899
+
900
+ # make sure to pop weight so that corresponding model is not saved again
901
+ weights.pop()
902
+
903
+ LoraLoaderMixin.save_lora_weights(
904
+ output_dir,
905
+ unet_lora_layers=unet_lora_layers_to_save,
906
+ text_encoder_lora_layers=text_encoder_lora_layers_to_save,
907
+ )
908
+
909
+ def load_model_hook(models, input_dir):
910
+ # Note we DON'T pass the unet and text encoder here an purpose
911
+ # so that the we don't accidentally override the LoRA layers of
912
+ # unet_lora_layers and text_encoder_lora_layers which are stored in `models`
913
+ # with new torch.nn.Modules / weights. We simply use the pipeline class as
914
+ # an easy way to load the lora checkpoints
915
+ temp_pipeline = DiffusionPipeline.from_pretrained(
916
+ args.pretrained_model_name_or_path,
917
+ revision=args.revision,
918
+ torch_dtype=weight_dtype,
919
+ )
920
+ temp_pipeline.load_lora_weights(input_dir)
921
+
922
+ # load lora weights into models
923
+ models[0].load_state_dict(AttnProcsLayers(temp_pipeline.unet.attn_processors).state_dict())
924
+ if len(models) > 1:
925
+ models[1].load_state_dict(AttnProcsLayers(temp_pipeline.text_encoder_lora_attn_procs).state_dict())
926
+
927
+ # delete temporary pipeline and pop models
928
+ del temp_pipeline
929
+ for _ in range(len(models)):
930
+ models.pop()
931
+
932
+ accelerator.register_save_state_pre_hook(save_model_hook)
933
+ accelerator.register_load_state_pre_hook(load_model_hook)
934
+
935
+ # Enable TF32 for faster training on Ampere GPUs,
936
+ # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
937
+ if args.allow_tf32:
938
+ torch.backends.cuda.matmul.allow_tf32 = True
939
+
940
+ if args.scale_lr:
941
+ args.learning_rate = (
942
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
943
+ )
944
+
945
+ # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
946
+ if args.use_8bit_adam:
947
+ try:
948
+ import bitsandbytes as bnb
949
+ except ImportError:
950
+ raise ImportError(
951
+ "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
952
+ )
953
+
954
+ optimizer_class = bnb.optim.AdamW8bit
955
+ else:
956
+ optimizer_class = torch.optim.AdamW
957
+
958
+ # Optimizer creation
959
+ params_to_optimize = (
960
+ itertools.chain(unet_lora_layers.parameters(), text_encoder_lora_layers.parameters())
961
+ if args.train_text_encoder
962
+ else unet_lora_layers.parameters()
963
+ )
964
+ optimizer = optimizer_class(
965
+ params_to_optimize,
966
+ lr=args.learning_rate,
967
+ betas=(args.adam_beta1, args.adam_beta2),
968
+ weight_decay=args.adam_weight_decay,
969
+ eps=args.adam_epsilon,
970
+ )
971
+
972
+ if args.pre_compute_text_embeddings:
973
+
974
+ def compute_text_embeddings(prompt):
975
+ with torch.no_grad():
976
+ text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=args.tokenizer_max_length)
977
+ prompt_embeds = encode_prompt(
978
+ text_encoder,
979
+ text_inputs.input_ids,
980
+ text_inputs.attention_mask,
981
+ text_encoder_use_attention_mask=args.text_encoder_use_attention_mask,
982
+ )
983
+
984
+ return prompt_embeds
985
+
986
+ pre_computed_encoder_hidden_states = compute_text_embeddings(args.instance_prompt)
987
+ validation_prompt_negative_prompt_embeds = compute_text_embeddings("")
988
+
989
+ if args.validation_prompt is not None:
990
+ validation_prompt_encoder_hidden_states = compute_text_embeddings(args.validation_prompt)
991
+ else:
992
+ validation_prompt_encoder_hidden_states = None
993
+
994
+ if args.instance_prompt is not None:
995
+ pre_computed_instance_prompt_encoder_hidden_states = compute_text_embeddings(args.instance_prompt)
996
+ else:
997
+ pre_computed_instance_prompt_encoder_hidden_states = None
998
+
999
+ text_encoder = None
1000
+ tokenizer = None
1001
+
1002
+ gc.collect()
1003
+ torch.cuda.empty_cache()
1004
+ else:
1005
+ pre_computed_encoder_hidden_states = None
1006
+ validation_prompt_encoder_hidden_states = None
1007
+ validation_prompt_negative_prompt_embeds = None
1008
+ pre_computed_instance_prompt_encoder_hidden_states = None
1009
+
1010
+ # Dataset and DataLoaders creation:
1011
+ train_dataset = DreamBoothDataset(
1012
+ instance_data_root=args.instance_data_dir,
1013
+ instance_prompt=args.instance_prompt,
1014
+ class_data_root=args.class_data_dir if args.with_prior_preservation else None,
1015
+ class_prompt=args.class_prompt,
1016
+ class_num=args.num_class_images,
1017
+ tokenizer=tokenizer,
1018
+ size=args.resolution,
1019
+ center_crop=args.center_crop,
1020
+ encoder_hidden_states=pre_computed_encoder_hidden_states,
1021
+ instance_prompt_encoder_hidden_states=pre_computed_instance_prompt_encoder_hidden_states,
1022
+ tokenizer_max_length=args.tokenizer_max_length,
1023
+ )
1024
+
1025
+ train_dataloader = torch.utils.data.DataLoader(
1026
+ train_dataset,
1027
+ batch_size=args.train_batch_size,
1028
+ shuffle=True,
1029
+ collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
1030
+ num_workers=args.dataloader_num_workers,
1031
+ )
1032
+
1033
+ # Scheduler and math around the number of training steps.
1034
+ overrode_max_train_steps = False
1035
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1036
+ if args.max_train_steps is None:
1037
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1038
+ overrode_max_train_steps = True
1039
+
1040
+ lr_scheduler = get_scheduler(
1041
+ args.lr_scheduler,
1042
+ optimizer=optimizer,
1043
+ num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
1044
+ num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
1045
+ num_cycles=args.lr_num_cycles,
1046
+ power=args.lr_power,
1047
+ )
1048
+
1049
+ # Prepare everything with our `accelerator`.
1050
+ if args.train_text_encoder:
1051
+ unet_lora_layers, text_encoder_lora_layers, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1052
+ unet_lora_layers, text_encoder_lora_layers, optimizer, train_dataloader, lr_scheduler
1053
+ )
1054
+ else:
1055
+ unet_lora_layers, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1056
+ unet_lora_layers, optimizer, train_dataloader, lr_scheduler
1057
+ )
1058
+
1059
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
1060
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1061
+ if overrode_max_train_steps:
1062
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1063
+ # Afterwards we recalculate our number of training epochs
1064
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
1065
+
1066
+ # We need to initialize the trackers we use, and also store our configuration.
1067
+ # The trackers initializes automatically on the main process.
1068
+ if accelerator.is_main_process:
1069
+ accelerator.init_trackers("dreambooth-lora", config=vars(args))
1070
+
1071
+ # Train!
1072
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
1073
+
1074
+ logger.info("***** Running training *****")
1075
+ logger.info(f" Num examples = {len(train_dataset)}")
1076
+ logger.info(f" Num batches each epoch = {len(train_dataloader)}")
1077
+ logger.info(f" Num Epochs = {args.num_train_epochs}")
1078
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
1079
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
1080
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
1081
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
1082
+ global_step = 0
1083
+ first_epoch = 0
1084
+
1085
+ # Potentially load in the weights and states from a previous save
1086
+ if args.resume_from_checkpoint:
1087
+ if args.resume_from_checkpoint != "latest":
1088
+ path = os.path.basename(args.resume_from_checkpoint)
1089
+ else:
1090
+ # Get the mos recent checkpoint
1091
+ dirs = os.listdir(args.output_dir)
1092
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
1093
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
1094
+ path = dirs[-1] if len(dirs) > 0 else None
1095
+
1096
+ if path is None:
1097
+ accelerator.print(
1098
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
1099
+ )
1100
+ args.resume_from_checkpoint = None
1101
+ else:
1102
+ accelerator.print(f"Resuming from checkpoint {path}")
1103
+ accelerator.load_state(os.path.join(args.output_dir, path))
1104
+ global_step = int(path.split("-")[1])
1105
+
1106
+ resume_global_step = global_step * args.gradient_accumulation_steps
1107
+ first_epoch = global_step // num_update_steps_per_epoch
1108
+ resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
1109
+
1110
+ # Only show the progress bar once on each machine.
1111
+ progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
1112
+ progress_bar.set_description("Steps")
1113
+
1114
+ for epoch in range(first_epoch, args.num_train_epochs):
1115
+ unet.train()
1116
+ if args.train_text_encoder:
1117
+ text_encoder.train()
1118
+ for step, batch in enumerate(train_dataloader):
1119
+ # Skip steps until we reach the resumed step
1120
+ if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
1121
+ if step % args.gradient_accumulation_steps == 0:
1122
+ progress_bar.update(1)
1123
+ continue
1124
+
1125
+ with accelerator.accumulate(unet):
1126
+ pixel_values = batch["pixel_values"].to(dtype=weight_dtype)
1127
+ if vae is not None:
1128
+ # Convert images to latent space
1129
+ model_input = vae.encode(pixel_values).latent_dist
1130
+ model_input = model_input.sample() * vae.config.scaling_factor
1131
+ else:
1132
+ model_input = pixel_values
1133
+
1134
+ # Sample noise that we'll add to the latents
1135
+ noise = torch.randn_like(model_input)
1136
+ bsz, channels, height, width = model_input.shape
1137
+ # Sample a random timestep for each image
1138
+ timesteps = torch.randint(
1139
+ 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device
1140
+ )
1141
+ timesteps = timesteps.long()
1142
+
1143
+ # Add noise to the model input according to the noise magnitude at each timestep
1144
+ # (this is the forward diffusion process)
1145
+ noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
1146
+
1147
+ # Get the text embedding for conditioning
1148
+ if args.pre_compute_text_embeddings:
1149
+ encoder_hidden_states = batch["input_ids"]
1150
+ else:
1151
+ encoder_hidden_states = encode_prompt(
1152
+ text_encoder,
1153
+ batch["input_ids"],
1154
+ batch["attention_mask"],
1155
+ text_encoder_use_attention_mask=args.text_encoder_use_attention_mask,
1156
+ )
1157
+
1158
+ if accelerator.unwrap_model(unet).config.in_channels == channels * 2:
1159
+ noisy_model_input = torch.cat([noisy_model_input, noisy_model_input], dim=1)
1160
+
1161
+ if args.class_labels_conditioning == "timesteps":
1162
+ class_labels = timesteps
1163
+ else:
1164
+ class_labels = None
1165
+
1166
+ # Predict the noise residual
1167
+ model_pred = unet(
1168
+ noisy_model_input, timesteps, encoder_hidden_states, class_labels=class_labels
1169
+ ).sample
1170
+
1171
+ # if model predicts variance, throw away the prediction. we will only train on the
1172
+ # simplified training objective. This means that all schedulers using the fine tuned
1173
+ # model must be configured to use one of the fixed variance variance types.
1174
+ if model_pred.shape[1] == 6:
1175
+ model_pred, _ = torch.chunk(model_pred, 2, dim=1)
1176
+
1177
+ # Get the target for loss depending on the prediction type
1178
+ if noise_scheduler.config.prediction_type == "epsilon":
1179
+ target = noise
1180
+ elif noise_scheduler.config.prediction_type == "v_prediction":
1181
+ target = noise_scheduler.get_velocity(model_input, noise, timesteps)
1182
+ else:
1183
+ raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
1184
+
1185
+ if args.with_prior_preservation:
1186
+ # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
1187
+ model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
1188
+ target, target_prior = torch.chunk(target, 2, dim=0)
1189
+
1190
+ # Compute instance loss
1191
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1192
+
1193
+ # Compute prior loss
1194
+ prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
1195
+
1196
+ # Add the prior loss to the instance loss.
1197
+ loss = loss + args.prior_loss_weight * prior_loss
1198
+ else:
1199
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1200
+
1201
+ accelerator.backward(loss)
1202
+ if accelerator.sync_gradients:
1203
+ params_to_clip = (
1204
+ itertools.chain(unet_lora_layers.parameters(), text_encoder_lora_layers.parameters())
1205
+ if args.train_text_encoder
1206
+ else unet_lora_layers.parameters()
1207
+ )
1208
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1209
+ optimizer.step()
1210
+ lr_scheduler.step()
1211
+ optimizer.zero_grad()
1212
+
1213
+ # Checks if the accelerator has performed an optimization step behind the scenes
1214
+ if accelerator.sync_gradients:
1215
+ progress_bar.update(1)
1216
+ global_step += 1
1217
+
1218
+ if accelerator.is_main_process:
1219
+ if global_step % args.checkpointing_steps == 0:
1220
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1221
+ accelerator.save_state(save_path)
1222
+ logger.info(f"Saved state to {save_path}")
1223
+
1224
+ logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1225
+ progress_bar.set_postfix(**logs)
1226
+ accelerator.log(logs, step=global_step)
1227
+
1228
+ if global_step >= args.max_train_steps:
1229
+ break
1230
+
1231
+ if accelerator.is_main_process:
1232
+ if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
1233
+ logger.info(
1234
+ f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
1235
+ f" {args.validation_prompt}."
1236
+ )
1237
+ # create pipeline
1238
+ pipeline = DiffusionPipeline.from_pretrained(
1239
+ args.pretrained_model_name_or_path,
1240
+ unet=accelerator.unwrap_model(unet),
1241
+ text_encoder=None if args.pre_compute_text_embeddings else accelerator.unwrap_model(text_encoder),
1242
+ revision=args.revision,
1243
+ torch_dtype=weight_dtype,
1244
+ )
1245
+
1246
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
1247
+ scheduler_args = {}
1248
+
1249
+ if "variance_type" in pipeline.scheduler.config:
1250
+ variance_type = pipeline.scheduler.config.variance_type
1251
+
1252
+ if variance_type in ["learned", "learned_range"]:
1253
+ variance_type = "fixed_small"
1254
+
1255
+ scheduler_args["variance_type"] = variance_type
1256
+
1257
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
1258
+ pipeline.scheduler.config, **scheduler_args
1259
+ )
1260
+
1261
+ pipeline = pipeline.to(accelerator.device)
1262
+ pipeline.set_progress_bar_config(disable=True)
1263
+
1264
+ # run inference
1265
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
1266
+ if args.pre_compute_text_embeddings:
1267
+ pipeline_args = {
1268
+ "prompt_embeds": validation_prompt_encoder_hidden_states,
1269
+ "negative_prompt_embeds": validation_prompt_negative_prompt_embeds,
1270
+ }
1271
+ else:
1272
+ pipeline_args = {"prompt": args.validation_prompt}
1273
+
1274
+ if args.validation_images is None:
1275
+ images = [
1276
+ pipeline(**pipeline_args, generator=generator).images[0]
1277
+ for _ in range(args.num_validation_images)
1278
+ ]
1279
+ else:
1280
+ images = []
1281
+ for image in args.validation_images:
1282
+ image = Image.open(image)
1283
+ image = pipeline(**pipeline_args, image=image, generator=generator).images[0]
1284
+ images.append(image)
1285
+
1286
+ for tracker in accelerator.trackers:
1287
+ if tracker.name == "tensorboard":
1288
+ np_images = np.stack([np.asarray(img) for img in images])
1289
+ tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
1290
+ if tracker.name == "wandb":
1291
+ tracker.log(
1292
+ {
1293
+ "validation": [
1294
+ wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
1295
+ for i, image in enumerate(images)
1296
+ ]
1297
+ }
1298
+ )
1299
+
1300
+ del pipeline
1301
+ torch.cuda.empty_cache()
1302
+
1303
+ # Save the lora layers
1304
+ accelerator.wait_for_everyone()
1305
+ if accelerator.is_main_process:
1306
+ unet = unet.to(torch.float32)
1307
+ unet_lora_layers = accelerator.unwrap_model(unet_lora_layers)
1308
+
1309
+ if text_encoder is not None:
1310
+ text_encoder = text_encoder.to(torch.float32)
1311
+ text_encoder_lora_layers = accelerator.unwrap_model(text_encoder_lora_layers)
1312
+
1313
+ LoraLoaderMixin.save_lora_weights(
1314
+ save_directory=args.output_dir,
1315
+ unet_lora_layers=unet_lora_layers,
1316
+ text_encoder_lora_layers=text_encoder_lora_layers,
1317
+ )
1318
+
1319
+ accelerator.end_training()
1320
+
1321
+
1322
+ if __name__ == "__main__":
1323
+ args = parse_args()
1324
+ main(args)
lora/train_lora.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export SAMPLE_DIR="lora/samples/sculpture"
2
+ export OUTPUT_DIR="lora/lora_ckpt/sculpture_lora"
3
+
4
+ export MODEL_NAME="runwayml/stable-diffusion-v1-5"
5
+ export LORA_RANK=16
6
+
7
+ accelerate launch lora/train_dreambooth_lora.py \
8
+ --pretrained_model_name_or_path=$MODEL_NAME \
9
+ --instance_data_dir=$SAMPLE_DIR \
10
+ --output_dir=$OUTPUT_DIR \
11
+ --instance_prompt="a photo of a sculpture" \
12
+ --resolution=512 \
13
+ --train_batch_size=1 \
14
+ --gradient_accumulation_steps=1 \
15
+ --checkpointing_steps=100 \
16
+ --learning_rate=2e-4 \
17
+ --lr_scheduler="constant" \
18
+ --lr_warmup_steps=0 \
19
+ --max_train_steps=200 \
20
+ --lora_rank=$LORA_RANK \
21
+ --seed="0"
lora_tmp/pytorch_lora_weights.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a91eca307a7df4b4af0d73f52f0cbf8ac8693da50388f28271fb33a4fcdd6df7
3
+ size 12855259
release-doc/asset/accelerate_config.jpg ADDED
release-doc/asset/github_video.gif ADDED

Git LFS Details

  • SHA256: d87b873576337e4066094050203b4d53d1aef728db7979b0f16a0ae2518ea705
  • Pointer size: 132 Bytes
  • Size of remote file: 7.62 MB
release-doc/licenses/LICENSE-lora.txt ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
results/2023-12-01-2318-20.png ADDED
results/2023-12-01-2319-14.png ADDED
results/2023-12-01-2320-47.png ADDED
results/2023-12-01-2321-38.png ADDED
results/2023-12-01-2322-25.png ADDED
results/2023-12-01-2324-23.png ADDED
results/2023-12-01-2326-06.png ADDED
results/2023-12-01-2328-23.png ADDED
results/2023-12-01-2329-06.png ADDED
results/2023-12-01-2330-14.png ADDED
results/2023-12-01-2331-09.png ADDED
results/2023-12-01-2331-41.png ADDED
results/2023-12-01-2332-17.png ADDED
results/2023-12-01-2336-40.png ADDED
results/2023-12-01-2338-51.png ADDED

Git LFS Details

  • SHA256: 5af31b413c421167798e03ee03722bacaea27ad37bc9334cf8470ca219e26693
  • Pointer size: 132 Bytes
  • Size of remote file: 1.1 MB
results/2023-12-01-2340-40.png ADDED

Git LFS Details

  • SHA256: eb5a56e0eb3c48d3f158523678893c0ccba693cbdaecd049e5d34aea51845a60
  • Pointer size: 132 Bytes
  • Size of remote file: 1.11 MB
results/2023-12-01-2342-40.png ADDED
results/2023-12-01-2349-09.png ADDED

Git LFS Details

  • SHA256: 12c80e3f8ab5f34f5bcd694692690d4ff9c95a5ddbdad898aeaef57bcc6afb3b
  • Pointer size: 132 Bytes
  • Size of remote file: 1.13 MB
results/2023-12-01-2350-12.png ADDED

Git LFS Details

  • SHA256: e753d9d6a79287979c0dd57ecc5393c854d550f853c6015283f6dfcd09c2e2a9
  • Pointer size: 132 Bytes
  • Size of remote file: 1.1 MB
results/2023-12-01-2353-51.png ADDED

Git LFS Details

  • SHA256: eaec82d0bd8036687a7086ad5d21ad06716e78dfc2fc1a1b6344f87962919472
  • Pointer size: 132 Bytes
  • Size of remote file: 1.17 MB
results/2023-12-01-2355-54.png ADDED

Git LFS Details

  • SHA256: 24426fc3aacea198e1877bde2d2606431150115cace81abb11378a285eb2be48
  • Pointer size: 132 Bytes
  • Size of remote file: 1.16 MB
results/2023-12-01-2357-39.png ADDED

Git LFS Details

  • SHA256: e827e996b40e369f8ff852b4fdfdac7399431536b47b5416b3258c7251c0b353
  • Pointer size: 132 Bytes
  • Size of remote file: 1 MB
results/2023-12-02-0000-23.png ADDED

Git LFS Details

  • SHA256: ed2fbfae67c236ff1f8439d93f0caefe401b546e5f8c405babbeedeb1d437d92
  • Pointer size: 132 Bytes
  • Size of remote file: 1.01 MB
results/2023-12-02-0002-02.png ADDED

Git LFS Details

  • SHA256: 864333336c74a2148ab2e77d3fffd23a61548544cf1186b1501914f43077dd14
  • Pointer size: 132 Bytes
  • Size of remote file: 1.01 MB
results/2023-12-02-0004-46.png ADDED
results/2023-12-05-1935-28.png ADDED

Git LFS Details

  • SHA256: 8a9ce897a62299e552480abdb8f24fd83fbc6aec3a7a3e4a03d7c359f557e572
  • Pointer size: 132 Bytes
  • Size of remote file: 1.05 MB
results/2023-12-05-1936-51.png ADDED

Git LFS Details

  • SHA256: ee81144d27c60be9e1e0f1057173339f526d71eeac94f13db704300ace3f9f8a
  • Pointer size: 132 Bytes
  • Size of remote file: 1.05 MB
results/2023-12-05-1937-52.png ADDED

Git LFS Details

  • SHA256: 8fc172ba5e6840f5dda2e18ae6d44e8216fc1799dc31b66e6111b8f3cfa9a2e2
  • Pointer size: 132 Bytes
  • Size of remote file: 1.05 MB
results/2023-12-05-1939-28.png ADDED

Git LFS Details

  • SHA256: 88f16c0149568a9034ffec78834b180ee488eb26a8bbd2daa5e7199a5055c0d6
  • Pointer size: 132 Bytes
  • Size of remote file: 1.37 MB
results/2023-12-05-1944-37.png ADDED