Harvey-J commited on
Commit
d9e8b4a
1 Parent(s): 6f7ea0c

Upload 34 files

Browse files
Fooocus-API/.dockerignore ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ .DS_Store
3
+ *.ckpt
4
+ *.safetensors
5
+ *.pth
6
+ *.pt
7
+ *.bin
8
+ *.patch
9
+ *.backup
10
+ *.corrupted
11
+ sorted_styles.json
12
+ /language/default.json
13
+ lena.png
14
+ lena_result.png
15
+ lena_test.py
16
+ config.txt
17
+ config_modification_tutorial.txt
18
+ user_path_config.txt
19
+ user_path_config-deprecated.txt
20
+ build_chb.py
21
+ experiment.py
22
+ /modules/*.png
23
+ /repositories
24
+ /venv
25
+ /tmp
26
+ /ui-config.json
27
+ /outputs
28
+ /config.json
29
+ /log
30
+ /webui.settings.bat
31
+ /embeddings
32
+ /styles.csv
33
+ /params.txt
34
+ /styles.csv.bak
35
+ /webui-user.bat
36
+ /webui-user.sh
37
+ /interrogate
38
+ /user.css
39
+ /.idea
40
+ /notification.ogg
41
+ /notification.mp3
42
+ /SwinIR
43
+ /textual_inversion
44
+ .vscode
45
+ /extensions
46
+ /test/stdout.txt
47
+ /test/stderr.txt
48
+ /cache.json*
49
+ /config_states/
50
+ /node_modules
51
+ /package-lock.json
52
+ /.coverage*
53
+ /auth.json
Fooocus-API/.github/workflows/docker-image.yml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Docker Image CI
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ - v*
7
+
8
+ jobs:
9
+
10
+ build:
11
+
12
+ runs-on: ubuntu-latest
13
+
14
+ steps:
15
+ -
16
+ name: Set up QEMU
17
+ uses: docker/setup-qemu-action@v3
18
+ -
19
+ name: Set up Docker Buildx
20
+ uses: docker/setup-buildx-action@v3
21
+ -
22
+ name: Login to Docker Hub
23
+ uses: docker/login-action@v3
24
+ with:
25
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
26
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
27
+ -
28
+ name: Build and push
29
+ uses: docker/build-push-action@v5
30
+ with:
31
+ push: true
32
+ tags: konieshadow/fooocus-api:latest,konieshadow/fooocus-api:${{ github.ref_name }}
Fooocus-API/.gitignore ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ .DS_Store
3
+ *.ckpt
4
+ *.safetensors
5
+ *.pth
6
+ *.pt
7
+ *.bin
8
+ *.patch
9
+ *.backup
10
+ *.corrupted
11
+ sorted_styles.json
12
+ /language/default.json
13
+ lena.png
14
+ lena_result.png
15
+ lena_test.py
16
+ config.txt
17
+ config_modification_tutorial.txt
18
+ user_path_config.txt
19
+ user_path_config-deprecated.txt
20
+ build_chb.py
21
+ experiment.py
22
+ /modules/*.png
23
+ /repositories
24
+ /venv
25
+ /tmp
26
+ /ui-config.json
27
+ /outputs
28
+ /config.json
29
+ /log
30
+ /webui.settings.bat
31
+ /embeddings
32
+ /styles.csv
33
+ /params.txt
34
+ /styles.csv.bak
35
+ /webui-user.bat
36
+ /webui-user.sh
37
+ /interrogate
38
+ /user.css
39
+ /.idea
40
+ /notification.ogg
41
+ /notification.mp3
42
+ /SwinIR
43
+ /textual_inversion
44
+ .vscode
45
+ /extensions
46
+ /test/stdout.txt
47
+ /test/stderr.txt
48
+ /cache.json*
49
+ /config_states/
50
+ /node_modules
51
+ /package-lock.json
52
+ /.coverage*
53
+ /auth.json
54
+ .cog/
55
+ /presets
56
+ *.db
Fooocus-API/Dockerfile ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04
2
+
3
+ ARG DEBIAN_FRONTEND=noninteractive
4
+ ENV TZ=Asia/Shanghai
5
+
6
+ RUN apt-get update && \
7
+ apt-get install --no-install-recommends -y python3 python3-pip python3-virtualenv && \
8
+ apt-get install --no-install-recommends -y libopencv-dev python3-opencv && \
9
+ rm -rf /var/lib/apt/lists/*
10
+
11
+ ENV VIRTUAL_ENV=/opt/venv
12
+ RUN virtualenv $VIRTUAL_ENV
13
+ ENV PATH="$VIRTUAL_ENV/bin:$PATH"
14
+
15
+ RUN pip install packaging
16
+
17
+ WORKDIR /app
18
+
19
+ COPY . /app/
20
+
21
+ CMD python3 main.py --host 0.0.0.0 --port 8888
Fooocus-API/LICENSE ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU GENERAL PUBLIC LICENSE
2
+ Version 3, 29 June 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU General Public License is a free, copyleft license for
11
+ software and other kinds of works.
12
+
13
+ The licenses for most software and other practical works are designed
14
+ to take away your freedom to share and change the works. By contrast,
15
+ the GNU General Public License is intended to guarantee your freedom to
16
+ share and change all versions of a program--to make sure it remains free
17
+ software for all its users. We, the Free Software Foundation, use the
18
+ GNU General Public License for most of our software; it applies also to
19
+ any other work released this way by its authors. You can apply it to
20
+ your programs, too.
21
+
22
+ When we speak of free software, we are referring to freedom, not
23
+ price. Our General Public Licenses are designed to make sure that you
24
+ have the freedom to distribute copies of free software (and charge for
25
+ them if you wish), that you receive source code or can get it if you
26
+ want it, that you can change the software or use pieces of it in new
27
+ free programs, and that you know you can do these things.
28
+
29
+ To protect your rights, we need to prevent others from denying you
30
+ these rights or asking you to surrender the rights. Therefore, you have
31
+ certain responsibilities if you distribute copies of the software, or if
32
+ you modify it: responsibilities to respect the freedom of others.
33
+
34
+ For example, if you distribute copies of such a program, whether
35
+ gratis or for a fee, you must pass on to the recipients the same
36
+ freedoms that you received. You must make sure that they, too, receive
37
+ or can get the source code. And you must show them these terms so they
38
+ know their rights.
39
+
40
+ Developers that use the GNU GPL protect your rights with two steps:
41
+ (1) assert copyright on the software, and (2) offer you this License
42
+ giving you legal permission to copy, distribute and/or modify it.
43
+
44
+ For the developers' and authors' protection, the GPL clearly explains
45
+ that there is no warranty for this free software. For both users' and
46
+ authors' sake, the GPL requires that modified versions be marked as
47
+ changed, so that their problems will not be attributed erroneously to
48
+ authors of previous versions.
49
+
50
+ Some devices are designed to deny users access to install or run
51
+ modified versions of the software inside them, although the manufacturer
52
+ can do so. This is fundamentally incompatible with the aim of
53
+ protecting users' freedom to change the software. The systematic
54
+ pattern of such abuse occurs in the area of products for individuals to
55
+ use, which is precisely where it is most unacceptable. Therefore, we
56
+ have designed this version of the GPL to prohibit the practice for those
57
+ products. If such problems arise substantially in other domains, we
58
+ stand ready to extend this provision to those domains in future versions
59
+ of the GPL, as needed to protect the freedom of users.
60
+
61
+ Finally, every program is threatened constantly by software patents.
62
+ States should not allow patents to restrict development and use of
63
+ software on general-purpose computers, but in those that do, we wish to
64
+ avoid the special danger that patents applied to a free program could
65
+ make it effectively proprietary. To prevent this, the GPL assures that
66
+ patents cannot be used to render the program non-free.
67
+
68
+ The precise terms and conditions for copying, distribution and
69
+ modification follow.
70
+
71
+ TERMS AND CONDITIONS
72
+
73
+ 0. Definitions.
74
+
75
+ "This License" refers to version 3 of the GNU General Public License.
76
+
77
+ "Copyright" also means copyright-like laws that apply to other kinds of
78
+ works, such as semiconductor masks.
79
+
80
+ "The Program" refers to any copyrightable work licensed under this
81
+ License. Each licensee is addressed as "you". "Licensees" and
82
+ "recipients" may be individuals or organizations.
83
+
84
+ To "modify" a work means to copy from or adapt all or part of the work
85
+ in a fashion requiring copyright permission, other than the making of an
86
+ exact copy. The resulting work is called a "modified version" of the
87
+ earlier work or a work "based on" the earlier work.
88
+
89
+ A "covered work" means either the unmodified Program or a work based
90
+ on the Program.
91
+
92
+ To "propagate" a work means to do anything with it that, without
93
+ permission, would make you directly or secondarily liable for
94
+ infringement under applicable copyright law, except executing it on a
95
+ computer or modifying a private copy. Propagation includes copying,
96
+ distribution (with or without modification), making available to the
97
+ public, and in some countries other activities as well.
98
+
99
+ To "convey" a work means any kind of propagation that enables other
100
+ parties to make or receive copies. Mere interaction with a user through
101
+ a computer network, with no transfer of a copy, is not conveying.
102
+
103
+ An interactive user interface displays "Appropriate Legal Notices"
104
+ to the extent that it includes a convenient and prominently visible
105
+ feature that (1) displays an appropriate copyright notice, and (2)
106
+ tells the user that there is no warranty for the work (except to the
107
+ extent that warranties are provided), that licensees may convey the
108
+ work under this License, and how to view a copy of this License. If
109
+ the interface presents a list of user commands or options, such as a
110
+ menu, a prominent item in the list meets this criterion.
111
+
112
+ 1. Source Code.
113
+
114
+ The "source code" for a work means the preferred form of the work
115
+ for making modifications to it. "Object code" means any non-source
116
+ form of a work.
117
+
118
+ A "Standard Interface" means an interface that either is an official
119
+ standard defined by a recognized standards body, or, in the case of
120
+ interfaces specified for a particular programming language, one that
121
+ is widely used among developers working in that language.
122
+
123
+ The "System Libraries" of an executable work include anything, other
124
+ than the work as a whole, that (a) is included in the normal form of
125
+ packaging a Major Component, but which is not part of that Major
126
+ Component, and (b) serves only to enable use of the work with that
127
+ Major Component, or to implement a Standard Interface for which an
128
+ implementation is available to the public in source code form. A
129
+ "Major Component", in this context, means a major essential component
130
+ (kernel, window system, and so on) of the specific operating system
131
+ (if any) on which the executable work runs, or a compiler used to
132
+ produce the work, or an object code interpreter used to run it.
133
+
134
+ The "Corresponding Source" for a work in object code form means all
135
+ the source code needed to generate, install, and (for an executable
136
+ work) run the object code and to modify the work, including scripts to
137
+ control those activities. However, it does not include the work's
138
+ System Libraries, or general-purpose tools or generally available free
139
+ programs which are used unmodified in performing those activities but
140
+ which are not part of the work. For example, Corresponding Source
141
+ includes interface definition files associated with source files for
142
+ the work, and the source code for shared libraries and dynamically
143
+ linked subprograms that the work is specifically designed to require,
144
+ such as by intimate data communication or control flow between those
145
+ subprograms and other parts of the work.
146
+
147
+ The Corresponding Source need not include anything that users
148
+ can regenerate automatically from other parts of the Corresponding
149
+ Source.
150
+
151
+ The Corresponding Source for a work in source code form is that
152
+ same work.
153
+
154
+ 2. Basic Permissions.
155
+
156
+ All rights granted under this License are granted for the term of
157
+ copyright on the Program, and are irrevocable provided the stated
158
+ conditions are met. This License explicitly affirms your unlimited
159
+ permission to run the unmodified Program. The output from running a
160
+ covered work is covered by this License only if the output, given its
161
+ content, constitutes a covered work. This License acknowledges your
162
+ rights of fair use or other equivalent, as provided by copyright law.
163
+
164
+ You may make, run and propagate covered works that you do not
165
+ convey, without conditions so long as your license otherwise remains
166
+ in force. You may convey covered works to others for the sole purpose
167
+ of having them make modifications exclusively for you, or provide you
168
+ with facilities for running those works, provided that you comply with
169
+ the terms of this License in conveying all material for which you do
170
+ not control copyright. Those thus making or running the covered works
171
+ for you must do so exclusively on your behalf, under your direction
172
+ and control, on terms that prohibit them from making any copies of
173
+ your copyrighted material outside their relationship with you.
174
+
175
+ Conveying under any other circumstances is permitted solely under
176
+ the conditions stated below. Sublicensing is not allowed; section 10
177
+ makes it unnecessary.
178
+
179
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180
+
181
+ No covered work shall be deemed part of an effective technological
182
+ measure under any applicable law fulfilling obligations under article
183
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184
+ similar laws prohibiting or restricting circumvention of such
185
+ measures.
186
+
187
+ When you convey a covered work, you waive any legal power to forbid
188
+ circumvention of technological measures to the extent such circumvention
189
+ is effected by exercising rights under this License with respect to
190
+ the covered work, and you disclaim any intention to limit operation or
191
+ modification of the work as a means of enforcing, against the work's
192
+ users, your or third parties' legal rights to forbid circumvention of
193
+ technological measures.
194
+
195
+ 4. Conveying Verbatim Copies.
196
+
197
+ You may convey verbatim copies of the Program's source code as you
198
+ receive it, in any medium, provided that you conspicuously and
199
+ appropriately publish on each copy an appropriate copyright notice;
200
+ keep intact all notices stating that this License and any
201
+ non-permissive terms added in accord with section 7 apply to the code;
202
+ keep intact all notices of the absence of any warranty; and give all
203
+ recipients a copy of this License along with the Program.
204
+
205
+ You may charge any price or no price for each copy that you convey,
206
+ and you may offer support or warranty protection for a fee.
207
+
208
+ 5. Conveying Modified Source Versions.
209
+
210
+ You may convey a work based on the Program, or the modifications to
211
+ produce it from the Program, in the form of source code under the
212
+ terms of section 4, provided that you also meet all of these conditions:
213
+
214
+ a) The work must carry prominent notices stating that you modified
215
+ it, and giving a relevant date.
216
+
217
+ b) The work must carry prominent notices stating that it is
218
+ released under this License and any conditions added under section
219
+ 7. This requirement modifies the requirement in section 4 to
220
+ "keep intact all notices".
221
+
222
+ c) You must license the entire work, as a whole, under this
223
+ License to anyone who comes into possession of a copy. This
224
+ License will therefore apply, along with any applicable section 7
225
+ additional terms, to the whole of the work, and all its parts,
226
+ regardless of how they are packaged. This License gives no
227
+ permission to license the work in any other way, but it does not
228
+ invalidate such permission if you have separately received it.
229
+
230
+ d) If the work has interactive user interfaces, each must display
231
+ Appropriate Legal Notices; however, if the Program has interactive
232
+ interfaces that do not display Appropriate Legal Notices, your
233
+ work need not make them do so.
234
+
235
+ A compilation of a covered work with other separate and independent
236
+ works, which are not by their nature extensions of the covered work,
237
+ and which are not combined with it such as to form a larger program,
238
+ in or on a volume of a storage or distribution medium, is called an
239
+ "aggregate" if the compilation and its resulting copyright are not
240
+ used to limit the access or legal rights of the compilation's users
241
+ beyond what the individual works permit. Inclusion of a covered work
242
+ in an aggregate does not cause this License to apply to the other
243
+ parts of the aggregate.
244
+
245
+ 6. Conveying Non-Source Forms.
246
+
247
+ You may convey a covered work in object code form under the terms
248
+ of sections 4 and 5, provided that you also convey the
249
+ machine-readable Corresponding Source under the terms of this License,
250
+ in one of these ways:
251
+
252
+ a) Convey the object code in, or embodied in, a physical product
253
+ (including a physical distribution medium), accompanied by the
254
+ Corresponding Source fixed on a durable physical medium
255
+ customarily used for software interchange.
256
+
257
+ b) Convey the object code in, or embodied in, a physical product
258
+ (including a physical distribution medium), accompanied by a
259
+ written offer, valid for at least three years and valid for as
260
+ long as you offer spare parts or customer support for that product
261
+ model, to give anyone who possesses the object code either (1) a
262
+ copy of the Corresponding Source for all the software in the
263
+ product that is covered by this License, on a durable physical
264
+ medium customarily used for software interchange, for a price no
265
+ more than your reasonable cost of physically performing this
266
+ conveying of source, or (2) access to copy the
267
+ Corresponding Source from a network server at no charge.
268
+
269
+ c) Convey individual copies of the object code with a copy of the
270
+ written offer to provide the Corresponding Source. This
271
+ alternative is allowed only occasionally and noncommercially, and
272
+ only if you received the object code with such an offer, in accord
273
+ with subsection 6b.
274
+
275
+ d) Convey the object code by offering access from a designated
276
+ place (gratis or for a charge), and offer equivalent access to the
277
+ Corresponding Source in the same way through the same place at no
278
+ further charge. You need not require recipients to copy the
279
+ Corresponding Source along with the object code. If the place to
280
+ copy the object code is a network server, the Corresponding Source
281
+ may be on a different server (operated by you or a third party)
282
+ that supports equivalent copying facilities, provided you maintain
283
+ clear directions next to the object code saying where to find the
284
+ Corresponding Source. Regardless of what server hosts the
285
+ Corresponding Source, you remain obligated to ensure that it is
286
+ available for as long as needed to satisfy these requirements.
287
+
288
+ e) Convey the object code using peer-to-peer transmission, provided
289
+ you inform other peers where the object code and Corresponding
290
+ Source of the work are being offered to the general public at no
291
+ charge under subsection 6d.
292
+
293
+ A separable portion of the object code, whose source code is excluded
294
+ from the Corresponding Source as a System Library, need not be
295
+ included in conveying the object code work.
296
+
297
+ A "User Product" is either (1) a "consumer product", which means any
298
+ tangible personal property which is normally used for personal, family,
299
+ or household purposes, or (2) anything designed or sold for incorporation
300
+ into a dwelling. In determining whether a product is a consumer product,
301
+ doubtful cases shall be resolved in favor of coverage. For a particular
302
+ product received by a particular user, "normally used" refers to a
303
+ typical or common use of that class of product, regardless of the status
304
+ of the particular user or of the way in which the particular user
305
+ actually uses, or expects or is expected to use, the product. A product
306
+ is a consumer product regardless of whether the product has substantial
307
+ commercial, industrial or non-consumer uses, unless such uses represent
308
+ the only significant mode of use of the product.
309
+
310
+ "Installation Information" for a User Product means any methods,
311
+ procedures, authorization keys, or other information required to install
312
+ and execute modified versions of a covered work in that User Product from
313
+ a modified version of its Corresponding Source. The information must
314
+ suffice to ensure that the continued functioning of the modified object
315
+ code is in no case prevented or interfered with solely because
316
+ modification has been made.
317
+
318
+ If you convey an object code work under this section in, or with, or
319
+ specifically for use in, a User Product, and the conveying occurs as
320
+ part of a transaction in which the right of possession and use of the
321
+ User Product is transferred to the recipient in perpetuity or for a
322
+ fixed term (regardless of how the transaction is characterized), the
323
+ Corresponding Source conveyed under this section must be accompanied
324
+ by the Installation Information. But this requirement does not apply
325
+ if neither you nor any third party retains the ability to install
326
+ modified object code on the User Product (for example, the work has
327
+ been installed in ROM).
328
+
329
+ The requirement to provide Installation Information does not include a
330
+ requirement to continue to provide support service, warranty, or updates
331
+ for a work that has been modified or installed by the recipient, or for
332
+ the User Product in which it has been modified or installed. Access to a
333
+ network may be denied when the modification itself materially and
334
+ adversely affects the operation of the network or violates the rules and
335
+ protocols for communication across the network.
336
+
337
+ Corresponding Source conveyed, and Installation Information provided,
338
+ in accord with this section must be in a format that is publicly
339
+ documented (and with an implementation available to the public in
340
+ source code form), and must require no special password or key for
341
+ unpacking, reading or copying.
342
+
343
+ 7. Additional Terms.
344
+
345
+ "Additional permissions" are terms that supplement the terms of this
346
+ License by making exceptions from one or more of its conditions.
347
+ Additional permissions that are applicable to the entire Program shall
348
+ be treated as though they were included in this License, to the extent
349
+ that they are valid under applicable law. If additional permissions
350
+ apply only to part of the Program, that part may be used separately
351
+ under those permissions, but the entire Program remains governed by
352
+ this License without regard to the additional permissions.
353
+
354
+ When you convey a copy of a covered work, you may at your option
355
+ remove any additional permissions from that copy, or from any part of
356
+ it. (Additional permissions may be written to require their own
357
+ removal in certain cases when you modify the work.) You may place
358
+ additional permissions on material, added by you to a covered work,
359
+ for which you have or can give appropriate copyright permission.
360
+
361
+ Notwithstanding any other provision of this License, for material you
362
+ add to a covered work, you may (if authorized by the copyright holders of
363
+ that material) supplement the terms of this License with terms:
364
+
365
+ a) Disclaiming warranty or limiting liability differently from the
366
+ terms of sections 15 and 16 of this License; or
367
+
368
+ b) Requiring preservation of specified reasonable legal notices or
369
+ author attributions in that material or in the Appropriate Legal
370
+ Notices displayed by works containing it; or
371
+
372
+ c) Prohibiting misrepresentation of the origin of that material, or
373
+ requiring that modified versions of such material be marked in
374
+ reasonable ways as different from the original version; or
375
+
376
+ d) Limiting the use for publicity purposes of names of licensors or
377
+ authors of the material; or
378
+
379
+ e) Declining to grant rights under trademark law for use of some
380
+ trade names, trademarks, or service marks; or
381
+
382
+ f) Requiring indemnification of licensors and authors of that
383
+ material by anyone who conveys the material (or modified versions of
384
+ it) with contractual assumptions of liability to the recipient, for
385
+ any liability that these contractual assumptions directly impose on
386
+ those licensors and authors.
387
+
388
+ All other non-permissive additional terms are considered "further
389
+ restrictions" within the meaning of section 10. If the Program as you
390
+ received it, or any part of it, contains a notice stating that it is
391
+ governed by this License along with a term that is a further
392
+ restriction, you may remove that term. If a license document contains
393
+ a further restriction but permits relicensing or conveying under this
394
+ License, you may add to a covered work material governed by the terms
395
+ of that license document, provided that the further restriction does
396
+ not survive such relicensing or conveying.
397
+
398
+ If you add terms to a covered work in accord with this section, you
399
+ must place, in the relevant source files, a statement of the
400
+ additional terms that apply to those files, or a notice indicating
401
+ where to find the applicable terms.
402
+
403
+ Additional terms, permissive or non-permissive, may be stated in the
404
+ form of a separately written license, or stated as exceptions;
405
+ the above requirements apply either way.
406
+
407
+ 8. Termination.
408
+
409
+ You may not propagate or modify a covered work except as expressly
410
+ provided under this License. Any attempt otherwise to propagate or
411
+ modify it is void, and will automatically terminate your rights under
412
+ this License (including any patent licenses granted under the third
413
+ paragraph of section 11).
414
+
415
+ However, if you cease all violation of this License, then your
416
+ license from a particular copyright holder is reinstated (a)
417
+ provisionally, unless and until the copyright holder explicitly and
418
+ finally terminates your license, and (b) permanently, if the copyright
419
+ holder fails to notify you of the violation by some reasonable means
420
+ prior to 60 days after the cessation.
421
+
422
+ Moreover, your license from a particular copyright holder is
423
+ reinstated permanently if the copyright holder notifies you of the
424
+ violation by some reasonable means, this is the first time you have
425
+ received notice of violation of this License (for any work) from that
426
+ copyright holder, and you cure the violation prior to 30 days after
427
+ your receipt of the notice.
428
+
429
+ Termination of your rights under this section does not terminate the
430
+ licenses of parties who have received copies or rights from you under
431
+ this License. If your rights have been terminated and not permanently
432
+ reinstated, you do not qualify to receive new licenses for the same
433
+ material under section 10.
434
+
435
+ 9. Acceptance Not Required for Having Copies.
436
+
437
+ You are not required to accept this License in order to receive or
438
+ run a copy of the Program. Ancillary propagation of a covered work
439
+ occurring solely as a consequence of using peer-to-peer transmission
440
+ to receive a copy likewise does not require acceptance. However,
441
+ nothing other than this License grants you permission to propagate or
442
+ modify any covered work. These actions infringe copyright if you do
443
+ not accept this License. Therefore, by modifying or propagating a
444
+ covered work, you indicate your acceptance of this License to do so.
445
+
446
+ 10. Automatic Licensing of Downstream Recipients.
447
+
448
+ Each time you convey a covered work, the recipient automatically
449
+ receives a license from the original licensors, to run, modify and
450
+ propagate that work, subject to this License. You are not responsible
451
+ for enforcing compliance by third parties with this License.
452
+
453
+ An "entity transaction" is a transaction transferring control of an
454
+ organization, or substantially all assets of one, or subdividing an
455
+ organization, or merging organizations. If propagation of a covered
456
+ work results from an entity transaction, each party to that
457
+ transaction who receives a copy of the work also receives whatever
458
+ licenses to the work the party's predecessor in interest had or could
459
+ give under the previous paragraph, plus a right to possession of the
460
+ Corresponding Source of the work from the predecessor in interest, if
461
+ the predecessor has it or can get it with reasonable efforts.
462
+
463
+ You may not impose any further restrictions on the exercise of the
464
+ rights granted or affirmed under this License. For example, you may
465
+ not impose a license fee, royalty, or other charge for exercise of
466
+ rights granted under this License, and you may not initiate litigation
467
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
468
+ any patent claim is infringed by making, using, selling, offering for
469
+ sale, or importing the Program or any portion of it.
470
+
471
+ 11. Patents.
472
+
473
+ A "contributor" is a copyright holder who authorizes use under this
474
+ License of the Program or a work on which the Program is based. The
475
+ work thus licensed is called the contributor's "contributor version".
476
+
477
+ A contributor's "essential patent claims" are all patent claims
478
+ owned or controlled by the contributor, whether already acquired or
479
+ hereafter acquired, that would be infringed by some manner, permitted
480
+ by this License, of making, using, or selling its contributor version,
481
+ but do not include claims that would be infringed only as a
482
+ consequence of further modification of the contributor version. For
483
+ purposes of this definition, "control" includes the right to grant
484
+ patent sublicenses in a manner consistent with the requirements of
485
+ this License.
486
+
487
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
488
+ patent license under the contributor's essential patent claims, to
489
+ make, use, sell, offer for sale, import and otherwise run, modify and
490
+ propagate the contents of its contributor version.
491
+
492
+ In the following three paragraphs, a "patent license" is any express
493
+ agreement or commitment, however denominated, not to enforce a patent
494
+ (such as an express permission to practice a patent or covenant not to
495
+ sue for patent infringement). To "grant" such a patent license to a
496
+ party means to make such an agreement or commitment not to enforce a
497
+ patent against the party.
498
+
499
+ If you convey a covered work, knowingly relying on a patent license,
500
+ and the Corresponding Source of the work is not available for anyone
501
+ to copy, free of charge and under the terms of this License, through a
502
+ publicly available network server or other readily accessible means,
503
+ then you must either (1) cause the Corresponding Source to be so
504
+ available, or (2) arrange to deprive yourself of the benefit of the
505
+ patent license for this particular work, or (3) arrange, in a manner
506
+ consistent with the requirements of this License, to extend the patent
507
+ license to downstream recipients. "Knowingly relying" means you have
508
+ actual knowledge that, but for the patent license, your conveying the
509
+ covered work in a country, or your recipient's use of the covered work
510
+ in a country, would infringe one or more identifiable patents in that
511
+ country that you have reason to believe are valid.
512
+
513
+ If, pursuant to or in connection with a single transaction or
514
+ arrangement, you convey, or propagate by procuring conveyance of, a
515
+ covered work, and grant a patent license to some of the parties
516
+ receiving the covered work authorizing them to use, propagate, modify
517
+ or convey a specific copy of the covered work, then the patent license
518
+ you grant is automatically extended to all recipients of the covered
519
+ work and works based on it.
520
+
521
+ A patent license is "discriminatory" if it does not include within
522
+ the scope of its coverage, prohibits the exercise of, or is
523
+ conditioned on the non-exercise of one or more of the rights that are
524
+ specifically granted under this License. You may not convey a covered
525
+ work if you are a party to an arrangement with a third party that is
526
+ in the business of distributing software, under which you make payment
527
+ to the third party based on the extent of your activity of conveying
528
+ the work, and under which the third party grants, to any of the
529
+ parties who would receive the covered work from you, a discriminatory
530
+ patent license (a) in connection with copies of the covered work
531
+ conveyed by you (or copies made from those copies), or (b) primarily
532
+ for and in connection with specific products or compilations that
533
+ contain the covered work, unless you entered into that arrangement,
534
+ or that patent license was granted, prior to 28 March 2007.
535
+
536
+ Nothing in this License shall be construed as excluding or limiting
537
+ any implied license or other defenses to infringement that may
538
+ otherwise be available to you under applicable patent law.
539
+
540
+ 12. No Surrender of Others' Freedom.
541
+
542
+ If conditions are imposed on you (whether by court order, agreement or
543
+ otherwise) that contradict the conditions of this License, they do not
544
+ excuse you from the conditions of this License. If you cannot convey a
545
+ covered work so as to satisfy simultaneously your obligations under this
546
+ License and any other pertinent obligations, then as a consequence you may
547
+ not convey it at all. For example, if you agree to terms that obligate you
548
+ to collect a royalty for further conveying from those to whom you convey
549
+ the Program, the only way you could satisfy both those terms and this
550
+ License would be to refrain entirely from conveying the Program.
551
+
552
+ 13. Use with the GNU Affero General Public License.
553
+
554
+ Notwithstanding any other provision of this License, you have
555
+ permission to link or combine any covered work with a work licensed
556
+ under version 3 of the GNU Affero General Public License into a single
557
+ combined work, and to convey the resulting work. The terms of this
558
+ License will continue to apply to the part which is the covered work,
559
+ but the special requirements of the GNU Affero General Public License,
560
+ section 13, concerning interaction through a network will apply to the
561
+ combination as such.
562
+
563
+ 14. Revised Versions of this License.
564
+
565
+ The Free Software Foundation may publish revised and/or new versions of
566
+ the GNU General Public License from time to time. Such new versions will
567
+ be similar in spirit to the present version, but may differ in detail to
568
+ address new problems or concerns.
569
+
570
+ Each version is given a distinguishing version number. If the
571
+ Program specifies that a certain numbered version of the GNU General
572
+ Public License "or any later version" applies to it, you have the
573
+ option of following the terms and conditions either of that numbered
574
+ version or of any later version published by the Free Software
575
+ Foundation. If the Program does not specify a version number of the
576
+ GNU General Public License, you may choose any version ever published
577
+ by the Free Software Foundation.
578
+
579
+ If the Program specifies that a proxy can decide which future
580
+ versions of the GNU General Public License can be used, that proxy's
581
+ public statement of acceptance of a version permanently authorizes you
582
+ to choose that version for the Program.
583
+
584
+ Later license versions may give you additional or different
585
+ permissions. However, no additional obligations are imposed on any
586
+ author or copyright holder as a result of your choosing to follow a
587
+ later version.
588
+
589
+ 15. Disclaimer of Warranty.
590
+
591
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599
+
600
+ 16. Limitation of Liability.
601
+
602
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610
+ SUCH DAMAGES.
611
+
612
+ 17. Interpretation of Sections 15 and 16.
613
+
614
+ If the disclaimer of warranty and limitation of liability provided
615
+ above cannot be given local legal effect according to their terms,
616
+ reviewing courts shall apply local law that most closely approximates
617
+ an absolute waiver of all civil liability in connection with the
618
+ Program, unless a warranty or assumption of liability accompanies a
619
+ copy of the Program in return for a fee.
620
+
621
+ END OF TERMS AND CONDITIONS
622
+
623
+ How to Apply These Terms to Your New Programs
624
+
625
+ If you develop a new program, and you want it to be of the greatest
626
+ possible use to the public, the best way to achieve this is to make it
627
+ free software which everyone can redistribute and change under these terms.
628
+
629
+ To do so, attach the following notices to the program. It is safest
630
+ to attach them to the start of each source file to most effectively
631
+ state the exclusion of warranty; and each file should have at least
632
+ the "copyright" line and a pointer to where the full notice is found.
633
+
634
+ <one line to give the program's name and a brief idea of what it does.>
635
+ Copyright (C) <year> <name of author>
636
+
637
+ This program is free software: you can redistribute it and/or modify
638
+ it under the terms of the GNU General Public License as published by
639
+ the Free Software Foundation, either version 3 of the License, or
640
+ (at your option) any later version.
641
+
642
+ This program is distributed in the hope that it will be useful,
643
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
644
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645
+ GNU General Public License for more details.
646
+
647
+ You should have received a copy of the GNU General Public License
648
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
649
+
650
+ Also add information on how to contact you by electronic and paper mail.
651
+
652
+ If the program does terminal interaction, make it output a short
653
+ notice like this when it starts in an interactive mode:
654
+
655
+ <program> Copyright (C) <year> <name of author>
656
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657
+ This is free software, and you are welcome to redistribute it
658
+ under certain conditions; type `show c' for details.
659
+
660
+ The hypothetical commands `show w' and `show c' should show the appropriate
661
+ parts of the General Public License. Of course, your program's commands
662
+ might be different; for a GUI interface, you would use an "about box".
663
+
664
+ You should also get your employer (if you work as a programmer) or school,
665
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
666
+ For more information on this, and how to apply and follow the GNU GPL, see
667
+ <https://www.gnu.org/licenses/>.
668
+
669
+ The GNU General Public License does not permit incorporating your program
670
+ into proprietary programs. If your program is a subroutine library, you
671
+ may consider it more useful to permit linking proprietary applications with
672
+ the library. If this is what you want to do, use the GNU Lesser General
673
+ Public License instead of this License. But first, please read
674
+ <https://www.gnu.org/licenses/why-not-lgpl.html>.
Fooocus-API/README.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [![Docker Image CI](https://github.com/konieshadow/Fooocus-API/actions/workflows/docker-image.yml/badge.svg?branch=main)](https://github.com/konieshadow/Fooocus-API/actions/workflows/docker-image.yml)
2
+
3
+ [ English | [中文](/README_zh.md) ]
4
+
5
+ - [Introduction](#introduction)
6
+ - [Fooocus](#fooocus)
7
+ - [Fooocus-API](#fooocus-api)
8
+ - [Get-Start](#get-start)
9
+ - [Run with Replicate](#run-with-replicate)
10
+ - [Self hosted](#self-hosted)
11
+ - [conda](#conda)
12
+ - [venv](#venv)
13
+ - [predownload and install](#predownload-and-install)
14
+ - [already exist Fooocus](#already-exist-fooocus)
15
+ - [Start with docker](#start-with-docker)
16
+ - [cmd flags](#cmd-flags)
17
+ - [Change log](#change-log)
18
+ - [Apis](#apis)
19
+ - [License](#license)
20
+ - [Thanks :purple\_heart:](#thanks-purple_heart)
21
+
22
+
23
+ # Introduction
24
+
25
+ FastAPI powered API for [Fooocus](https://github.com/lllyasviel/Fooocus).
26
+
27
+ Currently loaded Fooocus version: [2.1.860](https://github.com/lllyasviel/Fooocus/blob/main/update_log.md).
28
+
29
+ ## Fooocus
30
+
31
+ This part from [Fooocus](https://github.com/lllyasviel/Fooocus) project.
32
+
33
+ Fooocus is an image generating software (based on [Gradio](https://www.gradio.app/)).
34
+
35
+ Fooocus is a rethinking of Stable Diffusion and Midjourney’s designs:
36
+
37
+ - Learned from Stable Diffusion, the software is offline, open source, and free.
38
+
39
+ - Learned from Midjourney, the manual tweaking is not needed, and users only need to focus on the prompts and images.
40
+
41
+ Fooocus has included and automated lots of inner optimizations and quality improvements. Users can forget all those difficult technical parameters, and just enjoy the interaction between human and computer to "explore new mediums of thought and expanding the imaginative powers of the human species"
42
+
43
+ ## Fooocus-API
44
+
45
+ I think you must have tried to use [Gradio client](https://www.gradio.app/docs/client) to call Fooocus, which was a terrible experience for me.
46
+
47
+ Fooocus API uses [FastAPI](https://fastapi.tiangolo.com/) provides the `REST` API for using Fooocus. Now, you can use Fooocus's powerful ability in any language you like.
48
+
49
+ In addition, we also provide detailed [documentation](/docs/api_doc_en.md) and [sample code](/examples)
50
+
51
+ # Get-Start
52
+
53
+ ## Run with Replicate
54
+
55
+ Now you can use Fooocus-API by Replicate, the model is on [konieshadow/fooocus-api](https://replicate.com/konieshadow/fooocus-api).
56
+
57
+ With preset:
58
+
59
+ - [konieshadow/fooocus-api-anime](https://replicate.com/konieshadow/fooocus-api-anime)
60
+ - [konieshadow/fooocus-api-realistic](https://replicate.com/konieshadow/fooocus-api-realistic)
61
+
62
+ I believe this is the easiest way to generate image with Fooocus's power.
63
+
64
+ ## Self hosted
65
+
66
+ You need python version >= 3.10, or use conda to create a new env.
67
+
68
+ The hardware requirements are what Fooocus needs. You can find detail [here](https://github.com/lllyasviel/Fooocus#minimal-requirement)
69
+
70
+ ### conda
71
+
72
+ You can easily start app follow this step use conda:
73
+
74
+ ```shell
75
+ conda env create -f environment.yaml
76
+ conda activate fooocus-api
77
+ ```
78
+
79
+ and then, run `python main.py` to start app, default, server is listening on `http://127.0.0.1:8888`
80
+
81
+ > If you are running the project for the first time, you may have to wait for a while, during which time the program will complete the rest of the installation and download the necessary models. You can also do these steps manually, which I'll mention later.
82
+
83
+ ### venv
84
+
85
+ Similar to using conda, create a virtual environment, and then start and wait for a while
86
+
87
+ ```powershell
88
+ # windows
89
+ python -m venv venv
90
+ .\venv\Scripts\Activate
91
+ ```
92
+
93
+ ```shell
94
+ # linux
95
+ python -m venv venv
96
+ source venv/bin/activate
97
+ ```
98
+ and then, run `python main.py`
99
+
100
+ ### predownload and install
101
+
102
+ If you want to deal with environmental problems manually and download the model in advance, you can refer to the following steps
103
+
104
+ After creating a complete environment using conda or venv, you can manually complete the installation of the subsequent environment, just follow
105
+
106
+ first, install requirements `pip install -r requirements.txt`
107
+
108
+ then, pytorch with cuda `pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cu121` , you can find more info about this [here](https://pytorch.org/get-started/previous-versions/),
109
+
110
+ > It is important to note that for pytorch and cuda versions, the recommended version of Fooocus is used, which is currently pytorch2.1.0+cuda12.1. If you insist, you can also use other versions, but you need to add `--skip-pip` when you start app, otherwise the recommended version will be installed automatically
111
+
112
+ next, make a dir named `repositories` and clone `https://github.com/lllyasviel/Fooocus` in to it. You must be use `git clone` but not download zip. If you have an existing Fooocus, please see [here](#already-exist-fooocus)
113
+
114
+ last, you can download models and put it into `repositories\Fooocus\models`
115
+
116
+ here is a list need to download for startup (for different [startup params](#cmd-flags) maybe difference):
117
+
118
+ - checkpoint: path to `repositories\Fooocus\models\checkpoints`
119
+ + [juggernautXL_version6Rundiffusion.safetensors](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors)
120
+
121
+ - vae_approx: path to `repositories\Fooocus\models\vae_approx`
122
+ + [xlvaeapp.pth](https://huggingface.co/lllyasviel/misc/resolve/main/xlvaeapp.pth')
123
+ + [vaeapp_sd15.pth](https://huggingface.co/lllyasviel/misc/resolve/main/vaeapp_sd15.pt)
124
+ + [xl-to-v1_interposer-v3.1.safetensors](https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors)
125
+
126
+ - lora: path to `repositories\Fooocus\models\loras`
127
+ + [sd_xl_offset_example-lora_1.0.safetensors](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/raw/main/sd_xl_offset_example-lora_1.0.safetensors)
128
+
129
+ > I've uploaded the model I'm using, which contains almost all the base models that Fooocus will use! I put it [here](https://www.123pan.com/s/dF5A-SIQsh.html) 提取码: `D4Mk`
130
+
131
+ ### already exist Fooocus
132
+
133
+ If you already have Fooocus installed, and it is work well, The recommended way is to reuse models, you just simple copy `config.txt` file from your local Fooocus folder to Fooocus-API's root folder. See [Customization](https://github.com/lllyasviel/Fooocus#customization) for details.
134
+
135
+ Use this method you will have both Fooocus and Fooocus-API running at the same time. And they operate independently and do not interfere with each other.
136
+
137
+ > It is not recommended to copy an existing Fooocus installation directly to the repositories directory. If you insist on doing this, please make sure that the Fooocus directory is a Git repository, otherwise the program will not start properly
138
+
139
+ ## Start with docker
140
+
141
+ Before use docker with GPU, you should [install NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) first.
142
+
143
+ Run
144
+
145
+ ```shell
146
+ docker run -d --gpus=all \
147
+ -e NVIDIA_DRIVER_CAPABILITIES=compute,utility \
148
+ -e NVIDIA_VISIBLE_DEVICES=all \
149
+ -p 8888:8888 konieshadow/fooocus-api
150
+ ```
151
+
152
+ For a more complex usage:
153
+
154
+ ```shell
155
+ mkdir ~/repositories
156
+ mkdir -p ~/.cache/pip
157
+
158
+ docker run -d --gpus=all \
159
+ -e NVIDIA_DRIVER_CAPABILITIES=compute,utility \
160
+ -e NVIDIA_VISIBLE_DEVICES=all \
161
+ -v ~/repositories:/app/repositories \
162
+ -v ~/.cache/pip:/root/.cache/pip \
163
+ -p 8888:8888 konieshadow/fooocus-api
164
+ ```
165
+
166
+ It will persistent the dependent repositories and pip cache.
167
+
168
+ You can add `-e PIP_INDEX_URL={pypi-mirror-url}` to docker run command to change pip index url.
169
+
170
+ # cmd flags
171
+
172
+ - `-h, --help` show this help message and exit
173
+ - `--port PORT` Set the listen port, default: 8888
174
+ - `--host HOST` Set the listen host, default: 127.0.0.1
175
+ - `--base-url BASE_URL` Set base url for outside visit, default is http://host:port
176
+ - `--log-level LOG_LEVEL` Log info for Uvicorn, default: info
177
+ - `--sync-repo SYNC_REPO` Sync dependent git repositories to local, 'skip' for skip sync action, 'only' for only do the sync action and not launch app
178
+ - `--skip-pip` Skip automatic pip install when setup
179
+ - `--preload-pipeline` Preload pipeline before start http server
180
+ - `--queue-size QUEUE_SIZE` Working queue size, default: 3, generation requests exceeding working queue size will return failure
181
+ - `--queue-history QUEUE_HISTORY` Finished jobs reserve size, tasks exceeding the limit will be deleted, including output image files, default: 0, means no limit
182
+ - `--webhook-url WEBHOOK_URL` Webhook url for notify generation result, default: None
183
+ - `--presistent` Store history to db
184
+
185
+ Since v0.3.25, added CMD flags support of Fooocus. You can pass any argument which Fooocus supported.
186
+
187
+ For example, to startup image generation (need more vRAM):
188
+
189
+ ```
190
+ python main.py --all-in-fp16 --always-gpu
191
+ ```
192
+
193
+ For Fooocus CMD flags, see [here](https://github.com/lllyasviel/Fooocus?tab=readme-ov-file#all-cmd-flags).
194
+
195
+
196
+ # Change log
197
+
198
+ **[24/01/10] v0.3.29** : support for store history to db
199
+
200
+ **[24/01/09] v0.3.29** : Image Prompt Mixing requirements implemented, With this implementation, you can send image prompts, and perform inpainting or upscaling with a single request.
201
+
202
+ **[24/01/04] v0.3.29** : Merged Fooocus v2.1.860
203
+
204
+ **[24/01/03] v0.3.28** : add text-to-image-with-ip interface
205
+
206
+ **[23/12/29] v0.3.27** : Add describe interface,now you can get prompt from image
207
+
208
+ **[23/12/29] v0.3.27** : Add query job hitory api. Add webhook_url support for each generation request.
209
+
210
+ **[23/12/28] v0.3.26** : **Break Change**: Add web-hook cmd flag for notify generation result. Change async job id to uuid to avoid conflict between each startup.
211
+
212
+ **[23/12/22] v0.3.25** : Add CMD flags support of Fooocus. **Break Change**: Removed cli argument `disable-private-log`. You can use Fooocus's `--disable-image-log` for the same purpose.
213
+
214
+ **[23/12/19] v0.3.24** : Merge for Fooocus v2.1.852. This version merged Fooocus v2.1.839, which include a seed breaking change. Details for [2.1.839](https://github.com/lllyasviel/Fooocus/blob/main/update_log.md#21839).
215
+
216
+ **[23/12/14] v0.3.23** : Merge for Fooocus v2.1.837.
217
+
218
+ **[23/11/30] v0.3.22** : Add upscale custom support. You can pass param `upscale_value` for upsacle api to override upscale value.
219
+
220
+ **[23/11/28] v0.3.21** : Add custom size support for outpaint. Thanks to [freek99](https://github.com/freek99). Delete output files when exceeding task queue history limit. Remove restrictions on input resolution. Now you can use any combination of `width*height` for `aspect_ratios_selection`. Change type of `seed` field from generation result to String to avoid numerical overflow.
221
+
222
+ older change history you can find in [release page](https://github.com/konieshadow/Fooocus-API/releases)
223
+
224
+
225
+ # Apis
226
+
227
+ you can find all api detail [here](/docs/api_doc_en.md)
228
+
229
+ # License
230
+
231
+
232
+ # Thanks :purple_heart:
233
+
234
+ Thanks for all your contributions and efforts towards improving the Fooocus API. We thank you for being part of our :sparkles: community :sparkles:!
Fooocus-API/README_zh.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [![Docker Image CI](https://github.com/konieshadow/Fooocus-API/actions/workflows/docker-image.yml/badge.svg?branch=main)](https://github.com/konieshadow/Fooocus-API/actions/workflows/docker-image.yml)
2
+
3
+ [ [English](/README.md) | 中文 ]
4
+
5
+ - [简介](#简介)
6
+ - [Fooocus](#fooocus)
7
+ - [Fooocus-API](#fooocus-api)
8
+ - [开始](#开始)
9
+ - [在 Replicate 上运行](#在-replicate-上运行)
10
+ - [自托管](#自托管)
11
+ - [conda](#conda)
12
+ - [venv](#venv)
13
+ - [预下载及安装](#预下载及安装)
14
+ - [已经有安装好的 Fooocus](#已经有安装好的-fooocus)
15
+ - [使用Docker启动](#使用docker启动)
16
+ - [命令行参数](#命令行参数)
17
+ - [更新日志](#更新日志)
18
+ - [Apis](#apis)
19
+ - [License](#license)
20
+ - [感谢 :purple\_heart:](#感谢-purple_heart)
21
+
22
+
23
+ # 简介
24
+
25
+ 使用 FastAPI 构建的 [Fooocus](https://github.com/lllyasviel/Fooocus) 的 API。
26
+
27
+ 当前支持的 Fooocus 版本: [2.1.860](https://github.com/lllyasviel/Fooocus/blob/main/update_log.md)。
28
+
29
+ ## Fooocus
30
+
31
+ 该部分出自 [Fooocus](https://github.com/lllyasviel/Fooocus) 项目。
32
+
33
+ Fooocus 是一个图像生成软件 (基于 [Gradio](https://www.gradio.app/))。
34
+
35
+ Fooocus 是对于 Stable Diffusion 和 Midjourney 的重新思考以及设计:
36
+
37
+ - 我们学习了 Stable Diffusion 的开源、免费、离线运行。
38
+
39
+ - 我们学习了 Midjourney 的专注,不需要手动调整,专注于描述词以及图像。
40
+
41
+ Fooocus 包含了许多内部优化以及质量改进。 忘记那些复杂困难的技术参数,享受人机交互带来的想象力的突破以及探索新的思维
42
+
43
+ ## Fooocus-API
44
+
45
+ 可能你已经尝试过使用 [Gradio client](https://www.gradio.app/docs/client) 来调用 Fooocus,对我来说可真是不咋地
46
+
47
+ Fooocus API 使用 [FastAPI](https://fastapi.tiangolo.com/) 构建了一系列 `REST` API 来使用 Fooocus。现在,你可以用任何你喜欢的编程语言来调用 Fooocus 的强大能力。
48
+
49
+ 此外,我们还提供了详细的 [文档](/docs/api_doc_zh.md) 和 [示例代码](/examples)
50
+
51
+ # 开始
52
+
53
+ ## 在 Replicate 上运行
54
+
55
+ 现在你可以在 Replicate 上使用 Fooocus-API,在这儿: [konieshadow/fooocus-api](https://replicate.com/konieshadow/fooocus-api).
56
+
57
+ 使用预先调整参数的:
58
+
59
+ - [konieshadow/fooocus-api-anime](https://replicate.com/konieshadow/fooocus-api-anime)
60
+ - [konieshadow/fooocus-api-realistic](https://replicate.com/konieshadow/fooocus-api-realistic)
61
+
62
+ 我认为这是更简单的体验 Fooocus's 强大的方法
63
+
64
+ ## 自托管
65
+
66
+ 需要 Python >= 3.10,或者使用 conda、venv 创建一个新的环境
67
+
68
+ 硬件需求来源于 Fooocus。 详细要求可以看[这里](https://github.com/lllyasviel/Fooocus#minimal-requirement)
69
+
70
+ ### conda
71
+
72
+ 按照下面的步骤启动一个 app:
73
+
74
+ ```shell
75
+ conda env create -f environment.yaml
76
+ conda activate fooocus-api
77
+ ```
78
+
79
+ 然后,执行 `python main.py` 启动 app ,默认情况下会监听在 `http://127.0.0.1:8888`
80
+
81
+ > 如果是第一次运行,程序会自动处理完成剩余的环境配置、模型下载等工作,因此会等待一段时间。也可以预先配置好环境、下载模型,后面会提到。
82
+
83
+ ### venv
84
+
85
+ 和使用 conda 差不多,创建虚拟环境,启动 app ,等待程序完成环境安装、模型下载
86
+
87
+ ```powershell
88
+ # windows
89
+ python -m venv venv
90
+ .\venv\Scripts\Activate
91
+ ```
92
+
93
+ ```shell
94
+ # linux
95
+ python -m venv venv
96
+ source venv/bin/activate
97
+ ```
98
+ 然后执行 `python main.py`
99
+
100
+ ### 预下载及安装
101
+
102
+ 如果想要手动配置环境以及放置模型,可以参考下面的步骤
103
+
104
+ 在创建完 conda 或者 venv 环境之后,按照下面的步骤手动配置环境、下载模型
105
+
106
+ 首先,安装 requirements: `pip install -r requirements.txt`
107
+
108
+ 然后安装 pytorch+cuda: `pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cu121` 更多安装信息在[这儿](https://pytorch.org/get-started/previous-versions/),
109
+
110
+ > 关于 pytorch 和 cuda 的版本,Fooocus API 使用的是 Fooocus 推荐的版本,目前是 pytorch2.1.0+cuda12.1。如果你是个"犟种"非要用其他版本,我测试过也是可以的,不过启动的时候记得加上 `--skip-pip`,否则程序会自动替换为推荐版本。
111
+
112
+ 然后创建一个名为 `repositories` 的目录,将 `https://github.com/lllyasviel/Fooocus` 克隆到其中。注意必须使用 `git clone`,`download zip`下载解压不包含Git信息,无法正常运行。如果你有一个已经安装完成的 Fooocus,查看[这里](#已经有安装好的-fooocus)
113
+
114
+ 最后,把下载的模型放到这个目录 `repositories\Fooocus\models`
115
+
116
+ 这里是一个启动必须下载的模型列表 (也可能不一样如果 [启动参数](#命令行参数) 不同的话):
117
+
118
+ - checkpoint: 放到 `repositories\Fooocus\models\checkpoints`
119
+ + [juggernautXL_version6Rundiffusion.safetensors](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors)
120
+
121
+ - vae_approx: 放到 `repositories\Fooocus\models\vae_approx`
122
+ + [xlvaeapp.pth](https://huggingface.co/lllyasviel/misc/resolve/main/xlvaeapp.pth')
123
+ + [vaeapp_sd15.pth](https://huggingface.co/lllyasviel/misc/resolve/main/vaeapp_sd15.pt)
124
+ + [xl-to-v1_interposer-v3.1.safetensors](https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors)
125
+
126
+ - lora: 放到 `repositories\Fooocus\models\loras`
127
+ + [sd_xl_offset_example-lora_1.0.safetensors](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/raw/main/sd_xl_offset_example-lora_1.0.safetensors)
128
+
129
+ > 国内不好下的到 [这儿](https://www.123pan.com/s/dF5A-SIQsh.html)下载, 提取码: `D4Mk`
130
+
131
+ ### 已经有安装好的 Fooocus
132
+
133
+ 如果你已经有一个安装好的且运行正常的 Fooocus, 推荐的方式是复用模型, 只需要将 Fooocus 根目录下的 `config.txt` 文件复制到 Fooocus API 的根目录即可。 查看 [Customization](https://github.com/lllyasviel/Fooocus#customization) 获取更多细节.
134
+
135
+ 使用这种方法 Fooocus 和 Fooocus API 会同时存在,独立运行互不干扰。
136
+
137
+ > 除非你能确保已安装的 Fooocus 目录是一个 Git 仓库,否则不推荐直接将其复制到 repositories 目录。
138
+
139
+ ## 使用Docker启动
140
+
141
+ 开始之前,先安装 [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html),这是 Docker 可以使用 GPU 的前提。
142
+
143
+ 运行
144
+
145
+ ```shell
146
+ docker run -d --gpus=all \
147
+ -e NVIDIA_DRIVER_CAPABILITIES=compute,utility \
148
+ -e NVIDIA_VISIBLE_DEVICES=all \
149
+ -p 8888:8888 konieshadow/fooocus-api
150
+ ```
151
+
152
+ 一个更实用的例子:
153
+
154
+ ```shell
155
+ mkdir ~/repositories
156
+ mkdir -p ~/.cache/pip
157
+
158
+ docker run -d --gpus=all \
159
+ -e NVIDIA_DRIVER_CAPABILITIES=compute,utility \
160
+ -e NVIDIA_VISIBLE_DEVICES=all \
161
+ -v ~/repositories:/app/repositories \
162
+ -v ~/.cache/pip:/root/.cache/pip \
163
+ -p 8888:8888 konieshadow/fooocus-api
164
+ ```
165
+
166
+ 这里把 `repositories` 和 `pip cache` 映射到了本地
167
+
168
+ 你还可以添加 `-e PIP_INDEX_URL={pypi-mirror-url}` 选项来更换 pip 源
169
+
170
+ # 命令行参数
171
+
172
+ - `-h, --help` 显示本帮助并退出
173
+ - `--port PORT` 设置监听端口,默认:8888
174
+ - `--host HOST` 设置监听地址,默认:127.0.0.1
175
+ - `--base-url BASE_URL` 设置返回结果中的地址,默认是: http://host:port
176
+ - `--log-level LOG_LEVEL` Uvicorn 中的日志等级,默认:info
177
+ - `--sync-repo SYNC_REPO` 同步 Fooocus 仓库到本地,`skip` 用于在启动时跳过同步,`only` 只同步不启动程序
178
+ - `--skip-pip` 跳过启动时的 pip 安装
179
+ - `--preload-pipeline` 启动 http server 之前加载 pipeline
180
+ - `--queue-size QUEUE_SIZE` 工作队列大小,默认是 3 ,超过队列的请求会返回失败
181
+ - `--queue-history QUEUE_HISTORY` 保留的作业历史,默认 0 即无限制,超过会被删除,包括生成的图像
182
+ - `--webhook-url WEBHOOK_URL` 通知生成结果的 webhook 地址,默认为 None
183
+ - `--presistent` 持久化历史记录到SQLite数据库,默认关闭
184
+
185
+ 从 v0.3.25 开始, Fooocus 的命令行选项也被支持,你可以在启动时加上 Fooocus 支持的选项
186
+
187
+ 比如(需要更大的显存):
188
+
189
+ ```
190
+ python main.py --all-in-fp16 --always-gpu
191
+ ```
192
+
193
+ 完成的 Fooocus 命令行选项可以在[这儿](https://github.com/lllyasviel/Fooocus?tab=readme-ov-file#all-cmd-flags)找到。
194
+
195
+
196
+ # 更新日志
197
+
198
+ **[24/01/10] v0.3.29** : 支持将历史生成数据持久化到数据库,并且支持从数据库中读取历史数据
199
+
200
+ **[24/01/09] v0.3.29** : Image Prompt Mixing requirements implemented, With this implementation, you can send image prompts, and perform inpainting or upscaling with a single request.
201
+
202
+ **[24/01/04] v0.3.29** : 合并了 Fooocus v2.1.860
203
+
204
+ **[24/01/03] v0.3.28** : 增加 text-to-image-with-ip 接口
205
+
206
+ **[23/12/29] v0.3.27** : 增加 describe 接口,现在你可以使用图像反推提示词了
207
+
208
+ **[23/12/29] v0.3.27** : 增加查询历史 API。增加 webhook_url 对所有请求的支持
209
+
210
+ **[23/12/28] v0.3.26** : **重大变更**: 添加 webhook 选项以支持生成完毕后的事件通知。将 async 的任务 ID 由数字改为 UUID 来避免应用重启后造成的混乱
211
+
212
+ **[23/12/22] v0.3.25** : 增加对 Fooocus 命令行选项的支持 **重大变更**: 移除`disable-private-log` 选项,你可以使用 Fooocus 原生的 `--disable-image-log` 来达到同样的效果
213
+
214
+ **[23/12/19] v0.3.24** : 该版本合并了 Fooocus v2.1.839, 包含一个对于 seed 的重大变更,详情参考:[2.1.839](https://github.com/lllyasviel/Fooocus/blob/main/update_log.md#21839).
215
+
216
+ **[23/12/14] v0.3.23** : 合并 Fooocus v2.1.837.
217
+
218
+ **[23/11/30] v0.3.22** : 支持自定义 upscale, 通过传递 `upscale_value` 给 upsacle api 来重写 upscale 值
219
+
220
+ **[23/11/28] v0.3.21** : 增加 outpaint 自定义大小,感谢 [freek99](https://github.com/freek99) 提供的代码。当超出队列历史限制时,删除生成的图像。删除对输入分辨率的限制。现在你可以通过 `width*height` 给 `aspect_ratios_selection` 来指定任意分辨率。将 `seed` 字段的类型从 `generation result` 更改为字符串,以避免数字溢出。
221
+
222
+ 更早的日志可以在 [release page](https://github.com/konieshadow/Fooocus-API/releases) 找到
223
+
224
+
225
+ # Apis
226
+
227
+ 你可以在[这里](/docs/api_doc_zh.md)找到所有的 API 细节
228
+
229
+ # License
230
+
231
+
232
+ # 感谢 :purple_heart:
233
+
234
+ 感谢所有为改进 Fooocus API 做出贡献和努力的人。再次感谢 :sparkles: 社区万岁 :sparkles:!
Fooocus-API/cog.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration for Cog ⚙️
2
+ # Reference: https://github.com/replicate/cog/blob/main/docs/yaml.md
3
+
4
+ build:
5
+ # set to true if your model requires a GPU
6
+ gpu: true
7
+ cuda: "12.1"
8
+
9
+ # a list of ubuntu apt packages to install
10
+ system_packages:
11
+ - "libgl1-mesa-glx"
12
+ - "libglib2.0-0"
13
+
14
+ # python version in the form '3.11' or '3.11.4'
15
+ python_version: "3.11"
16
+
17
+ # a list of packages in the format <package-name>==<version>
18
+ python_packages:
19
+ - "torchsde==0.2.5"
20
+ - "einops==0.4.1"
21
+ - "transformers==4.30.2"
22
+ - "safetensors==0.3.1"
23
+ - "accelerate==0.21.0"
24
+ - "pyyaml==6.0"
25
+ - "Pillow==9.2.0"
26
+ - "scipy==1.9.3"
27
+ - "tqdm==4.64.1"
28
+ - "psutil==5.9.5"
29
+ - "pytorch_lightning==1.9.4"
30
+ - "omegaconf==2.2.3"
31
+ - "pygit2==1.12.2"
32
+ - "opencv-contrib-python==4.8.0.74"
33
+ - "torch==2.1.0"
34
+ - "torchvision==0.16.0"
35
+
36
+ # commands run after the environment is setup
37
+ # run:
38
+ # - "echo env is ready!"
39
+ # - "echo another command if needed"
40
+
41
+ image: "r8.im/konieshadow/fooocus-api"
42
+
43
+ # predict.py defines how predictions are run on your model
44
+ predict: "predict.py:Predictor"
Fooocus-API/docs/api_doc_en.md ADDED
@@ -0,0 +1,971 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ - [Introduction](#introduction)
2
+ - [Fooocus capability related interfaces](#fooocus-capability-related-interfaces)
3
+ - [text-to-image](#text-to-image)
4
+ - [image-upscale-vary](#image-upscale-vary)
5
+ - [image-inpaint-outpaint](#image-inpaint-outpaint)
6
+ - [image-prompt](#image-prompt)
7
+ - [text-to-image-with-imageprompt](#text-to-image-with-imageprompt)
8
+ - [describe](#describe)
9
+ - [all-models](#all-models)
10
+ - [refresh-models](#refresh-models)
11
+ - [styles](#styles)
12
+ - [Fooocus API task related interfaces](#fooocus-api-task-related-interfaces)
13
+ - [job-queue](#job-queue)
14
+ - [query-job](#query-job)
15
+ - [job-history](#job-history)
16
+ - [stop](#stop)
17
+ - [ping](#ping)
18
+ - [webhook](#webhook)
19
+ - [public requests body](#public-requests-params)
20
+ - [AdvanceParams](#advanceparams)
21
+ - [lora](#lora)
22
+ - [response](#response)
23
+
24
+
25
+
26
+ # Introduction
27
+
28
+ Fooocus API are provided more than a dozen REST interfaces now, I roughly divide it into two categories, the first is the ability to call Fooocus, such as generating images, refreshing models, and so on, and the second is related to Fooocus API itself, mainly related to task queries. I will try to illustrate their role and usage and provide examples in the following content.
29
+
30
+ > Almost all interface parameters have default values, which means you only need to send the parameters you are interested in. The complete parameters and default values can be viewed in the table.
31
+
32
+ # Fooocus capability related interfaces
33
+
34
+ ## text-to-image
35
+
36
+ Corresponding to the function of text to image in Fooocus
37
+
38
+ **base info:**
39
+
40
+ ```yaml
41
+ EndPoint: /v1/generation/text-to-image
42
+ Method: Post
43
+ DataType: json
44
+ ```
45
+ **requests params:**
46
+
47
+ | Name | Type | Description |
48
+ | ---- | ---- | ----------- |
49
+ | prompt | string | prompt, default to empty string |
50
+ | negative_prompt | string | negative_prompt |
51
+ | style_selections | List[str] | list of style, must be supported style, you can get all supported [style](#styles) here |
52
+ | performance_selection | Enum | performance_selection, must be one of `Speed`, `Quality`, `Extreme Speed` default to `Speed`|
53
+ | aspect_ratios_selection | str | resolution, default to `1152*896` |
54
+ | image_number | int | the num of image to generate, default to 1 , max num is 32, note: Not a parallel interface |
55
+ | image_seed | int | seed, default to -1, meant random |
56
+ | sharpness | float | sharpness, default to 2.0 , 0-30 |
57
+ | guidance_scale | float | guidance scale, default to 4.0 , 1-30 |
58
+ | base_model_name | str | base model, default to `juggernautXL_version6Rundiffusion.safetensors` |
59
+ | refiner_model_name | str | refiner model, default to `None` |
60
+ | refiner_switch | float | refiner switch, default to 0.5 |
61
+ | loras | List[Lora] | lora list, include conf, lora: [Lora](#lora) |
62
+ | advanced_params | AdvacedParams | Adavanced params, [AdvancedParams](#advanceparams) |
63
+ | require_base64 | bool | require base64, default to False |
64
+ | async_process | bool | is async, default to False |
65
+ | webhook_url | str | after async task completed, address for callback, default to None, refer to [webhook](#webhook) |
66
+
67
+ **response params:**
68
+
69
+ Most response have the same structure, but different parts will be specifically explained
70
+
71
+ This interface returns a universal response structure, refer to [response](#response)
72
+
73
+ **request example:**
74
+
75
+ ```python
76
+ host = "http://127.0.0.1:8888"
77
+
78
+ def text2img(params: dict) -> dict:
79
+ """
80
+ text to image
81
+ """
82
+ result = requests.post(url=f"{host}/v1/generation/text-to-image",
83
+ data=json.dumps(params),
84
+ headers={"Content-Type": "application/json"})
85
+ return result.json()
86
+
87
+ result =text2img({
88
+ "prompt": "1girl sitting on the ground",
89
+ "async_process": True})
90
+ print(result)
91
+ ```
92
+
93
+ ## image-upscale-vary
94
+
95
+ Corresponding to the function of Upscale or Variation in Fooocus
96
+
97
+ the requests body for this interface based on [text-to-image](#text-to-image), so i will only list the difference with [text-to-image](#text-to-image)
98
+
99
+ In addition, the interface provides two versions, and there is no functional difference between the two versions, mainly due to slight differences in request methods
100
+
101
+ **base info:**
102
+
103
+ ```yaml
104
+ EndPoint_V1: /v1/generation/image-upscale-vary
105
+ EndPoint_V2: /v2/generation/image-upscale-vary
106
+ Method: Post
107
+ DataType: form|json
108
+ ```
109
+
110
+ ### V1
111
+
112
+ **requests params**
113
+
114
+ | Name | Type | Description |
115
+ | ---- | ---- |---------------------------|
116
+ | input_image | string($binary) | binary imagge |
117
+ | uov_method | Enum | 'Vary (Subtle)','Vary (Strong)','Upscale (1.5x)','Upscale (2x)','Upscale (Fast 2x)','Upscale (Custom)' |
118
+ | upscale_value | float | default to None , 1.0-5.0, magnification, only for uov_method is 'Upscale (Custom)' |
119
+ | style_selections | List[str] | list Fooocus style seg with comma |
120
+ | loras | str(List[Lora]) | list for lora, with configure, lora: [Lora](#lora), example: [{"model_name": "sd_xl_offset_example-lora_1.0.safetensors", "weight": 0.5}] |
121
+ | advanced_params | str(AdvacedParams) | AdvancedParams, AdvancedParams: [AdvancedParams](#advanceparams), send with str, None is available |
122
+
123
+ **response params:**
124
+
125
+ This interface returns a universal response structure, refer to [response](#response)
126
+
127
+ **requests example:**
128
+
129
+ ```python
130
+ # headers should not contain {"Content-Type": "application/json"}
131
+
132
+ host = "http://127.0.0.1:8888"
133
+ image = open("./examples/imgs/bear.jpg", "rb").read()
134
+
135
+ def upscale_vary(image, params: dict) -> dict:
136
+ """
137
+ Upscale or Vary
138
+ """
139
+ response = requests.post(url=f"{host}/v1/generation/image-upscale-vary",
140
+ data=params,
141
+ files={"input_image": image})
142
+ return response.json()
143
+
144
+ result =upscale_vary(image=image,
145
+ params={
146
+ "uov_method": "Upscale (2x)",
147
+ "async_process": True
148
+ })
149
+ print(json.dumps(result, indent=4, ensure_ascii=False))
150
+ ```
151
+
152
+ ### V2
153
+
154
+ **requests params**
155
+
156
+ | Name | Type | Description |
157
+ | ---- | ---- |---------------------------------------------------------------------------------------------------------------------------------------|
158
+ | uov_method | UpscaleOrVaryMethod | Enum type, value should one of 'Vary (Subtle)','Vary (Strong)','Upscale (1.5x)','Upscale (2x)','Upscale (Fast 2x)','Upscale (Custom)' |
159
+ | upscale_value | float | default to None , 1.0-5.0, magnification, only for uov_method is 'Upscale (Custom)' |
160
+ | input_image | str | input image, base64 str, or a URL |
161
+
162
+ **response params:**
163
+
164
+ This interface returns a universal response structure, refer to [response](#response)
165
+
166
+ **requests params:**
167
+
168
+ ```python
169
+ host = "http://127.0.0.1:8888"
170
+ image = open("./examples/imgs/bear.jpg", "rb").read()
171
+
172
+ def upscale_vary(image, params: dict) -> dict:
173
+ """
174
+ Upscale or Vary
175
+ """
176
+ params["input_image"] = base64.b64encode(image).decode('utf-8')
177
+ response = requests.post(url=f"{host}/v2/generation/image-upscale-vary",
178
+ data=json.dumps(params),
179
+ headers={"Content-Type": "application/json"},
180
+ timeout=300)
181
+ return response.json()
182
+
183
+ result =upscale_vary(image=image,
184
+ params={
185
+ "uov_method": "Upscale (2x)",
186
+ "async_process": True
187
+ })
188
+ print(json.dumps(result, indent=4, ensure_ascii=False))
189
+ ```
190
+
191
+ ## image-inpaint-outpaint
192
+
193
+ **base info:**
194
+
195
+ ```yaml
196
+ EndPoint_V1: /v1/generation/image-inpait-outpaint
197
+ EndPoint_V2: /v2/generation/image-inpait-outpaint
198
+ Method: Post
199
+ DataType: form|json
200
+ ```
201
+
202
+ ### V1
203
+
204
+ **requests params**
205
+
206
+ | Name | Type | Description |
207
+ | ---- | ---- |---------------------------------------------------------------------------------------------------------------------------|
208
+ | input_image | string($binary) | binary imagge |
209
+ | input_mask | string($binary) | binary imagge |
210
+ | inpaint_additional_prompt | string | additional_prompt |
211
+ | outpaint_selections | str | Image extension direction , 'Left', 'Right', 'Top', 'Bottom' seg with comma |
212
+ | outpaint_distance_left | int | Image extension distance, default to 0 |
213
+ | outpaint_distance_right | int | Image extension distance, default to 0 |
214
+ | outpaint_distance_top | int | Image extension distance, default to 0 |
215
+ | outpaint_distance_bottom | int | Image extension distance, default to 0 |
216
+ | style_selections | List[str] | list Fooocus style seg with comma |
217
+ | loras | str(List[Lora]) | list for lora, with configure, lora: Lora, example: [{"model_name": "sd_xl_offset_example-lora_1.0.safetensors", "weight": 0.5}] |
218
+ | advanced_params | str(AdvacedParams) | AdvancedParams, AdvancedParams: AdvancedParams, send with str, None is available |
219
+
220
+ **response params:**
221
+
222
+ This interface returns a universal response structure, refer to [response](#response)
223
+
224
+ **requests example:**
225
+
226
+ ```python
227
+ # example for inpaint outpaint v1
228
+ host = "http://127.0.0.1:8888"
229
+ image = open("./examples/imgs/bear.jpg", "rb").read()
230
+
231
+ def inpaint_outpaint(params: dict, input_image: bytes, input_mask: bytes = None) -> dict:
232
+ """
233
+ example for inpaint outpaint v1
234
+ """
235
+ response = requests.post(url=f"{host}/v1/generation/image-inpait-outpaint",
236
+ data=params,
237
+ files={"input_image": input_image,
238
+ "input_mask": input_mask})
239
+ return response.json()
240
+
241
+ # image extension example
242
+ result = inpaint_outpaint(params={
243
+ "outpaint_selections": "Left,Right",
244
+ "async_process": True},
245
+ input_image=image,
246
+ input_mask=None)
247
+ print(json.dumps(result, indent=4, ensure_ascii=False))
248
+
249
+ # image inpaint example
250
+ source = open("./examples/imgs/s.jpg", "rb").read()
251
+ mask = open("./examples/imgs/m.png", "rb").read()
252
+ result = inpaint_outpaint(params={
253
+ "prompt": "a cat",
254
+ "async_process": True},
255
+ input_image=source,
256
+ input_mask=mask)
257
+ print(json.dumps(result, indent=4, ensure_ascii=False))
258
+ ```
259
+
260
+ ### V2
261
+
262
+ **requests params**
263
+
264
+ | Name | Type | Description |
265
+ | ---- | ---- |---------------------------------------------------------------------------------|
266
+ | input_image | str | input image, base64 str, or a URL |
267
+ | input_mask | str | input mask, base64 str, or a URL |
268
+ | inpaint_additional_prompt | str | additional prompt |
269
+ | outpaint_selections | List[OutpaintExpansion] | OutpaintExpansion is Enum, value shoule one of "Left", "Right", "Top", "Bottom" |
270
+ | outpaint_distance_left | int | Image extension distance, default to 0 |
271
+ | outpaint_distance_right | int | Image extension distance, default to 0 |
272
+ | outpaint_distance_top | int | Image extension distance, default to 0 |
273
+ | outpaint_distance_bottom | int | Image extension distance, default to 0 |
274
+
275
+ **response params:**
276
+
277
+ This interface returns a universal response structure, refer to [response](#response)[response params](#response)
278
+
279
+ **requests example:**
280
+
281
+ ```python
282
+ # example for inpaint outpaint v2
283
+ host = "http://127.0.0.1:8888"
284
+ image = open("./examples/imgs/bear.jpg", "rb").read()
285
+
286
+ def inpaint_outpaint(params: dict) -> dict:
287
+ """
288
+ example for inpaint outpaint v2
289
+ """
290
+ response = requests.post(url=f"{host}/v2/generation/image-inpait-outpaint",
291
+ data=json.dumps(params),
292
+ headers={"Content-Type": "application/json"})
293
+ return response.json()
294
+
295
+ # image extension example
296
+ result = inpaint_outpaint(params={
297
+ "input_image": base64.b64encode(image).decode('utf-8'),
298
+ "input_mask": None,
299
+ "outpaint_selections": ["Left", "Right"],
300
+ "async_process": True})
301
+ print(json.dumps(result, indent=4, ensure_ascii=False))
302
+
303
+ # image inpaint example
304
+ source = open("./examples/imgs/s.jpg", "rb").read()
305
+ mask = open("./examples/imgs/m.png", "rb").read()
306
+ result = inpaint_outpaint(params={
307
+ "prompt": "a cat",
308
+ "input_image": base64.b64encode(source).decode('utf-8'),
309
+ "input_mask": base64.b64encode(mask).decode('utf-8'),
310
+ "async_process": True})
311
+ print(json.dumps(result, indent=4, ensure_ascii=False))
312
+ ```
313
+
314
+ ## image-prompt
315
+
316
+ `v0.3.27` has a break change. Interface based on change to [inpaint-outpaint](#image-inpaint-outpaint)
317
+
318
+ after v0.3.27, this interface implements the functions of `inpaint_outpaint` and `image-prompt`.
319
+
320
+ > Multi-function interface, which does not implement the functions of `inpaint_outpaint` and `image-prompt` at the same time in the same request
321
+
322
+ **base info:**
323
+
324
+ ```yaml
325
+ EndPoint_V1: /v1/generation/image-prompt
326
+ EndPoint_V2: /v2/generation/image-prompt
327
+ Method: Post
328
+ DataType: form|json
329
+ ```
330
+
331
+ ### V1
332
+
333
+ **requests params**
334
+
335
+ | Name | Type | Description |
336
+ | ---- | ---- |--------------------------------|
337
+ | input_image | Bytes | binary image, use for inpaint |
338
+ | input_mask | Bytes | binary image mask, use for inpaint |
339
+ | inpaint_additional_prompt | str | inpaint additional prompt |
340
+ | outpaint_selections | str | Image extension direction , 'Left', 'Right', 'Top', 'Bottom' seg with comma |
341
+ | outpaint_distance_left | int | Image extension distance, default to 0 |
342
+ | outpaint_distance_right | int | Image extension distance, default to 0 |
343
+ | outpaint_distance_top | int | Image extension distance, default to 0 |
344
+ | outpaint_distance_bottom | int | Image extension distance, default to 0 |
345
+ | cn_img1 | string($binary) | binary image |
346
+ | cn_stop1 | float | default to 0.6 |
347
+ | cn_weight1 | float | default to 0.6 |
348
+ | cn_type1 | Emum | should one of "ImagePrompt", "FaceSwap", "PyraCanny", "CPDS" |
349
+ | cn_img2 | string($binary) | binary image |
350
+ | cn_stop2 | float | default to 0.6 |
351
+ | cn_weight2 | float | default to 0.6 |
352
+ | cn_type2 | Emum | should one of "ImagePrompt", "FaceSwap", "PyraCanny", "CPDS" |
353
+ | cn_img3 | string($binary) | binary image |
354
+ | cn_stop3 | float | default to 0.6 |
355
+ | cn_weight3 | float | default to 0.6 |
356
+ | cn_type3 | Emum | should one of "ImagePrompt", "FaceSwap", "PyraCanny", "CPDS" |
357
+ | cn_img4 | string($binary) | binary image |
358
+ | cn_stop4 | float | default to 0.6 |
359
+ | cn_weight4 | float | default to 0.6 |
360
+ | cn_type4 | Emum | should one of "ImagePrompt", "FaceSwap", "PyraCanny", "CPDS" |
361
+ | style_selections | List[str] | list Fooocus style seg with comma |
362
+ | loras | str(List[Lora]) | list for lora, with configure, lora: Lora, example: [{"model_name": "sd_xl_offset_example-lora_1.0.safetensors", "weight": 0.5}] |
363
+ | advanced_params | str(AdvacedParams) | AdvancedParams, AdvancedParams: AdvancedParams, send with str, None is available |
364
+
365
+ **response params:**
366
+
367
+ This interface returns a universal response structure, refer to [response](#response)[response params](#response)
368
+
369
+ **requests example:**
370
+
371
+ ```python
372
+ # image_prompt v1 example
373
+ host = "http://127.0.0.1:8888"
374
+ image = open("./examples/imgs/bear.jpg", "rb").read()
375
+ source = open("./examples/imgs/s.jpg", "rb").read()
376
+ mask = open("./examples/imgs/m.png", "rb").read()
377
+
378
+ def image_prompt(params: dict,
379
+ input_iamge: bytes=None,
380
+ input_mask: bytes=None,
381
+ cn_img1: bytes=None,
382
+ cn_img2: bytes=None,
383
+ cn_img3: bytes=None,
384
+ cn_img4: bytes=None,) -> dict:
385
+ """
386
+ image prompt
387
+ """
388
+ response = requests.post(url=f"{host}/v1/generation/image-prompt",
389
+ data=params,
390
+ files={
391
+ "input_image": input_iamge,
392
+ "input_mask": input_mask,
393
+ "cn_img1": cn_img1,
394
+ "cn_img2": cn_img2,
395
+ "cn_img3": cn_img3,
396
+ "cn_img4": cn_img4,
397
+ })
398
+ return response.json()
399
+
400
+ # image extend
401
+ params = {
402
+ "outpaint_selections": ["Left", "Right"],
403
+ "image_prompts": [] # required, can be empty list
404
+ }
405
+ result = image_prompt(params=params, input_iamge=image)
406
+ print(json.dumps(result, indent=4, ensure_ascii=False))
407
+
408
+ # inpaint
409
+
410
+ params = {
411
+ "prompt": "1girl sitting on the chair",
412
+ "image_prompts": [], # required, can be empty list
413
+ "async_process": True
414
+ }
415
+ result = image_prompt(params=params, input_iamge=source, input_mask=mask)
416
+ print(json.dumps(result, indent=4, ensure_ascii=False))
417
+
418
+ # image prompt
419
+
420
+ params = {
421
+ "prompt": "1girl sitting on the chair",
422
+ "image_prompts": [
423
+ {
424
+ "cn_stop": 0.6,
425
+ "cn_weight": 0.6,
426
+ "cn_type": "ImagePrompt"
427
+ },{
428
+ "cn_stop": 0.6,
429
+ "cn_weight": 0.6,
430
+ "cn_type": "ImagePrompt"
431
+ }]
432
+ }
433
+ result = image_prompt(params=params, cn_img1=image, cn_img2=source)
434
+ print(json.dumps(result, indent=4, ensure_ascii=False))
435
+ ```
436
+
437
+ ### V2
438
+
439
+ **requests params**
440
+
441
+ | Name | Type | Description |
442
+ | ---- | ---- |-------------------------------------------------|
443
+ | input_image | str | base64 image, or a URL, use for inpaint |
444
+ | input_mask | str | base64 image mask, or a URL, use for inpaint |
445
+ | inpaint_additional_prompt | str | inpaint additional prompt |
446
+ | outpaint_selections | List[] | Image extension direction , 'Left', 'Right', 'Top', 'Bottom' seg with comma |
447
+ | outpaint_distance_left | int | Image extension distance, default to 0 |
448
+ | outpaint_distance_right | int | Image extension distance, default to 0 |
449
+ | outpaint_distance_top | int | Image extension distance, default to 0 |
450
+ | outpaint_distance_bottom | int | Image extension distance, default to 0 |
451
+ | image_prompts | List[ImagePrompt] | image list, include config, ImagePrompt struct: |
452
+
453
+ **ImagePrompt**
454
+
455
+ | Name | Type | Description |
456
+ | ---- | ---- |-------------------------------------------------------------------------------------|
457
+ | cn_img | str | input image, base64 str, or a URL |
458
+ | cn_stop | float | 0-1, default to 0.5 |
459
+ | cn_weight | float | weight, 0-2, default to 1.0 |
460
+ | cn_type | ControlNetType | ControlNetType Enum, should one of "ImagePrompt", "FaceSwap", "PyraCanny", "CPDS" |
461
+
462
+ **response params:**
463
+
464
+ This interface returns a universal response structure, refer to [response](#response)[response params](#response)
465
+
466
+ **requests example:**
467
+
468
+ ```python
469
+ # image_prompt v2 example
470
+ host = "http://127.0.0.1:8888"
471
+ image = open("./examples/imgs/bear.jpg", "rb").read()
472
+ source = open("./examples/imgs/s.jpg", "rb").read()
473
+ mask = open("./examples/imgs/m.png", "rb").read()
474
+
475
+ def image_prompt(params: dict) -> dict:
476
+ """
477
+ image prompt
478
+ """
479
+ response = requests.post(url=f"{host}/v2/generation/image-prompt",
480
+ data=json.dumps(params),
481
+ headers={"Content-Type": "application/json"})
482
+ return response.json()
483
+
484
+ # image extend
485
+ params = {
486
+ "input_image": base64.b64encode(image).decode('utf-8'),
487
+ "outpaint_selections": ["Left", "Right"],
488
+ "image_prompts": [] # required, can be empty list
489
+ }
490
+ result = image_prompt(params)
491
+ print(json.dumps(result, indent=4, ensure_ascii=False))
492
+
493
+ # inpaint
494
+
495
+ params = {
496
+ "prompt": "1girl sitting on the chair",
497
+ "input_image": base64.b64encode(source).decode('utf-8'),
498
+ "input_mask": base64.b64encode(mask).decode('utf-8'),
499
+ "image_prompts": [], # required, can be empty list
500
+ "async_process": True
501
+ }
502
+ result = image_prompt(params)
503
+ print(json.dumps(result, indent=4, ensure_ascii=False))
504
+
505
+ # image prompt
506
+
507
+ params = {
508
+ "prompt": "1girl sitting on the chair",
509
+ "image_prompts": [
510
+ {
511
+ "cn_img": base64.b64encode(source).decode('utf-8'),
512
+ "cn_stop": 0.6,
513
+ "cn_weight": 0.6,
514
+ "cn_type": "ImagePrompt"
515
+ },{
516
+ "cn_img": base64.b64encode(image).decode('utf-8'),
517
+ "cn_stop": 0.6,
518
+ "cn_weight": 0.6,
519
+ "cn_type": "ImagePrompt"
520
+ }]
521
+ }
522
+ result = image_prompt(params)
523
+ print(json.dumps(result, indent=4, ensure_ascii=False))
524
+ ```
525
+
526
+ ## text to image with imageprompt
527
+
528
+ this interface only provides v2 version
529
+
530
+ **base info:**
531
+
532
+ ```yaml
533
+ EndPoint: /v2/generation/text-to-image-with-ip
534
+ Method: Post
535
+ DataType: json
536
+ ```
537
+
538
+ **requests params**
539
+
540
+ | Name | Type | Description |
541
+ | ---- | ---- | ----------- |
542
+ | image_prompts | List[ImagePrompt] | Image list |
543
+
544
+ **requests example**:
545
+
546
+ ```python
547
+ # text to image with imageprompt example
548
+ host = "http://127.0.0.1:8888"
549
+ image = open("./examples/imgs/bear.jpg", "rb").read()
550
+ source = open("./examples/imgs/s.jpg", "rb").read()
551
+ def image_prompt(params: dict) -> dict:
552
+ """
553
+ image prompt
554
+ """
555
+ response = requests.post(url=f"{host}/v2/generation/text-to-image-with-ip",
556
+ data=json.dumps(params),
557
+ headers={"Content-Type": "application/json"})
558
+ return response.json()
559
+
560
+ params = {
561
+ "prompt": "A bear",
562
+ "image_prompts": [
563
+ {
564
+ "cn_img": base64.b64encode(source).decode('utf-8'),
565
+ "cn_stop": 0.6,
566
+ "cn_weight": 0.6,
567
+ "cn_type": "ImagePrompt"
568
+ },{
569
+ "cn_img": base64.b64encode(image).decode('utf-8'),
570
+ "cn_stop": 0.6,
571
+ "cn_weight": 0.6,
572
+ "cn_type": "ImagePrompt"
573
+ }
574
+ ]
575
+ }
576
+ result = image_prompt(params)
577
+ print(json.dumps(result, indent=4, ensure_ascii=False))
578
+ ```
579
+
580
+ ## describe
581
+
582
+ **base info:**
583
+
584
+ ```yaml
585
+ EndPoint: /v1/tools/describe-image
586
+ Method: Post
587
+ DataType: form
588
+ ```
589
+
590
+ **requests params**
591
+
592
+ | Name | Type | Description |
593
+ |------|------|------------------------------------------|
594
+ | type | Enum | type, should be one of "Photo", "Anime" |
595
+
596
+ **requests example**:
597
+
598
+ ```python
599
+ def describe_image(image: bytes,
600
+ params: dict = {"type": "Photo"}) -> dict:
601
+ """
602
+ describe-image
603
+ """
604
+ response = requests.post(url="http://127.0.0.1:8888/v1/tools/describe-image",
605
+ files={
606
+ "image": image
607
+ },
608
+ timeout=30)
609
+ return response.json()
610
+ ```
611
+
612
+ **response example**:
613
+
614
+ ```python
615
+ {
616
+ "describe": "a young woman posing with her hands behind her head"
617
+ }
618
+ ```
619
+
620
+ --------------------------------------------
621
+
622
+ ## all-models
623
+
624
+ **base info:**
625
+
626
+ ```yaml
627
+ EndPoint: /v1/engines/all-models
628
+ Method: Get
629
+ ```
630
+
631
+ **requests example**:
632
+
633
+ ```python
634
+ def all_models() -> dict:
635
+ """
636
+ all-models
637
+ """
638
+ response = requests.get(url="http://127.0.0.1:8888/v1/engines/all-models",
639
+ timeout=30)
640
+ return response.json()
641
+ ```
642
+
643
+ **response params**:
644
+
645
+ ```python
646
+ {
647
+ "model_filenames": [
648
+ "juggernautXL_version6Rundiffusion.safetensors",
649
+ "sd_xl_base_1.0_0.9vae.safetensors",
650
+ "sd_xl_refiner_1.0_0.9vae.safetensors"
651
+ ],
652
+ "lora_filenames": [
653
+ "sd_xl_offset_example-lora_1.0.safetensors"
654
+ ]
655
+ }
656
+ ```
657
+
658
+ ## refresh-models
659
+
660
+ **base info:**
661
+
662
+ ```yaml
663
+ EndPoint: /v1/engines/refresh-models
664
+ Method: Post
665
+ ```
666
+
667
+ **requests example**
668
+ ```python
669
+ def refresh() -> dict:
670
+ """
671
+ refresh-models
672
+ """
673
+ response = requests.post(url="http://127.0.0.1:8888/v1/engines/refresh-models",
674
+ timeout=30)
675
+ return response.json()
676
+ ```
677
+
678
+ **response params**
679
+ ```python
680
+ {
681
+ "model_filenames": [
682
+ "juggernautXL_version6Rundiffusion.safetensors",
683
+ "sd_xl_base_1.0_0.9vae.safetensors",
684
+ "sd_xl_refiner_1.0_0.9vae.safetensors"
685
+ ],
686
+ "lora_filenames": [
687
+ "sd_xl_offset_example-lora_1.0.safetensors"
688
+ ]
689
+ }
690
+ ```
691
+
692
+ ## styles
693
+
694
+ **base info:**
695
+
696
+ ```yaml
697
+ EndPoint: /v1/engines/styles
698
+ Method: Get
699
+ ```
700
+
701
+ **requests example**:
702
+
703
+ ```python
704
+ def styles() -> dict:
705
+ """
706
+ styles
707
+ """
708
+ response = requests.get(url="http://127.0.0.1:8888/v1/engines/styles",
709
+ timeout=30)
710
+ return response.json()
711
+ ```
712
+
713
+ **response params**:
714
+
715
+ ```python
716
+ [
717
+ "Fooocus V2",
718
+ "Fooocus Enhance",
719
+ ...
720
+ "Watercolor 2",
721
+ "Whimsical And Playful"
722
+ ]
723
+ ```
724
+
725
+ # Fooocus API task related interfaces
726
+
727
+ ## job-queue
728
+
729
+ **base info:**
730
+
731
+ ```yaml
732
+ EndPoint: /v1/engines/job-queue
733
+ Method: Get
734
+ ```
735
+
736
+ **requests example**:
737
+
738
+ ```python
739
+ def job_queue() -> dict:
740
+ """
741
+ job-queue
742
+ """
743
+ response = requests.get(url="http://127.0.0.1:8888/v1/generation/job-queue",
744
+ timeout=30)
745
+ return response.json()
746
+ ```
747
+
748
+ **response params**:
749
+
750
+ ```python
751
+ {
752
+ "running_size": 0,
753
+ "finished_size": 1,
754
+ "last_job_id": "cac3914a-926d-4b6f-a46a-83794a0ce1d4"
755
+ }
756
+ ```
757
+
758
+ ## query-job
759
+
760
+ **base info:**
761
+
762
+ ```yaml
763
+ EndPoint: /v1/generation/query-job
764
+ Method: Get
765
+ ```
766
+
767
+ **requests example**:
768
+ ```python
769
+ def taskResult(task_id: str) -> dict:
770
+ # get task status
771
+ task_status = requests.get(url="http://127.0.0.1:8888/v1/generation/query-job",
772
+ params={"job_id": task_id,
773
+ "require_step_preivew": False},
774
+ timeout=30)
775
+
776
+ return task_status.json()
777
+ ```
778
+
779
+ **response params**:
780
+ ```python
781
+ {
782
+ "job_id": "cac3914a-926d-4b6f-a46a-83794a0ce1d4",
783
+ "job_type": "Text to Image",
784
+ "job_stage": "SUCCESS",
785
+ "job_progress": 100,
786
+ "job_status": "Finished",
787
+ "job_step_preview": null,
788
+ "job_result": [
789
+ {
790
+ "base64": null,
791
+ "url": "http://127.0.0.1:8888/files/2023-11-27/b928e50e-3c09-4187-a3f9-1c12280bfd95.png",
792
+ "seed": 8228839561385006000,
793
+ "finish_reason": "SUCCESS"
794
+ }
795
+ ]
796
+ }
797
+ ```
798
+
799
+ ## job-history
800
+
801
+ **base info:**
802
+
803
+ ```yaml
804
+ EndPoint: /v1/generation/job-history
805
+ Method: get
806
+ ```
807
+
808
+ **requests example**:
809
+
810
+ ```python
811
+ def job-history() -> dict:
812
+ """
813
+ job-history
814
+ """
815
+ response = requests.get(url="http://127.0.0.1:8888/v1/generation/job-history",
816
+ timeout=30)
817
+ return response.json()
818
+ ```
819
+
820
+ **response params**:
821
+
822
+ ```python
823
+ {
824
+ "queue": [],
825
+ "history": [
826
+ "job_id": "cac3914a-926d-4b6f-a46a-83794a0ce1d4",
827
+ "is_finished": True
828
+ ]
829
+ }
830
+ ```
831
+
832
+ ## stop
833
+
834
+ **base info:**
835
+
836
+ ```yaml
837
+ EndPoint: /v1/generation/stop
838
+ Method: post
839
+ ```
840
+
841
+ **requests example**:
842
+
843
+ ```python
844
+ def stop() -> dict:
845
+ """
846
+ stop
847
+ """
848
+ response = requests.post(url="http://127.0.0.1:8888/v1/generation/stop",
849
+ timeout=30)
850
+ return response.json()
851
+ ```
852
+
853
+ **response params**:
854
+
855
+ ```python
856
+ {
857
+ "msg": "success"
858
+ }
859
+ ```
860
+
861
+ ## ping
862
+
863
+ **base info:**
864
+
865
+ ```yaml
866
+ EndPoint: /ping
867
+ Method: get
868
+ ```
869
+
870
+ pong
871
+
872
+ # webhook
873
+
874
+ You can specify an address through '--webhook_url' on the command line so that you can receive notifications after asynchronous tasks are completed
875
+
876
+ Here is a simple example to demonstrate how 'webhook' works
877
+
878
+ First,start a simple server using the following code:
879
+
880
+ ```python
881
+ from fastapi import FastAPI
882
+ import uvicorn
883
+
884
+ app = FastAPI()
885
+
886
+ @app.post("/status")
887
+ async def status(requests: dict):
888
+ print(requests)
889
+
890
+ uvicorn.run(app, host="0.0.0.0", port=8000)
891
+ ```
892
+
893
+ Then, start Fooocus API with `--webhook-url http://host:8000/status`
894
+
895
+ Submit a task in any way, and after completion, you will see the task completion information in the background of this simple server:
896
+
897
+ ```python
898
+ {'job_id': '717ec0b5-85df-4174-80d6-bddf93cd8248', 'job_result': [{'url': 'http://127.0.0.1:8888/files/2023-12-29/f1eca704-718e-4781-9d5f-82d41aa799d7.png', 'seed': '3283449865282320931'}]}
899
+ ```
900
+
901
+ # public requests params
902
+
903
+ ## AdvanceParams
904
+
905
+ | Name | Type | Description |
906
+ | ---- | ---- |----------------------------------------------------------------------------------|
907
+ | disable_preview | bool | disable preview, default to False |
908
+ | adm_scaler_positive | float | ADM Guidance Scaler, default to 1.5, range 0.1-3.0 |
909
+ | adm_scaler_negative | float | negative ADM Guidance Scaler, default to 0.8, range 0.1-3.0 |
910
+ | adm_scaler_end | float | ADM Guidance Scaler end value, default to 0.5, range 0.0-1.0 |
911
+ | refiner_swap_method | str | refiner model swap method, default to `joint` |
912
+ | adaptive_cfg | float | CFG Mimicking from TSNR, default to 7.0, range 1.0-30.0 |
913
+ | sampler_name | str | sampler, default to `default_sampler` |
914
+ | scheduler_name | str | scheduler, default to `default_scheduler` |
915
+ | overwrite_step | int | Forced Overwrite of Sampling Step, default to -1, range -1-200 |
916
+ | overwrite_switch | int | Forced Overwrite of Refiner Switch Step, default to -1, range -1-200 |
917
+ | overwrite_width | int | Forced Overwrite of Generating Width, default to -1, range -1-2048 |
918
+ | overwrite_height | int | Forced Overwrite of Generating Height, default to -1, range -1-2048 |
919
+ | overwrite_vary_strength | float | Forced Overwrite of Denoising Strength of "Vary", default to -1, range -1-1.0 |
920
+ | overwrite_upscale_strength | float | Forced Overwrite of Denoising Strength of "Upscale", default to -1, range -1-1.0 |
921
+ | mixing_image_prompt_and_vary_upscale | bool | Mixing Image Prompt and Vary/Upscale, default to False |
922
+ | mixing_image_prompt_and_inpaint | bool | Mixing Image Prompt and Inpaint, default to False |
923
+ | debugging_cn_preprocessor | bool | Debug Preprocessors, default to False |
924
+ | skipping_cn_preprocessor | bool | Skip Preprocessors, default to False |
925
+ | controlnet_softness | float | Softness of ControlNet, default to 0.25, range 0.0-1.0 |
926
+ | canny_low_threshold | int | Canny Low Threshold, default to 64, range 1-255 |
927
+ | canny_high_threshold | int | Canny High Threshold, default to 128, range 1-255 |
928
+ | freeu_enabled | bool | FreeU enabled, default to False |
929
+ | freeu_b1 | float | FreeU B1, default to 1.01 |
930
+ | freeu_b2 | float | FreeU B2, default to 1.02 |
931
+ | freeu_s1 | float | FreeU B3, default to 0.99 |
932
+ | freeu_s2 | float | FreeU B4, default to 0.95 |
933
+ | debugging_inpaint_preprocessor | bool | Debug Inpaint Preprocessing, default to False |
934
+ | inpaint_disable_initial_latent | bool | Disable initial latent in inpaint, default to False |
935
+ | inpaint_engine | str | Inpaint Engine, default to `v1` |
936
+ | inpaint_strength | float | Inpaint Denoising Strength, default to 1.0, range 0.0-1.0 |
937
+ | inpaint_respective_field | float | Inpaint Respective Field, default to 1.0, range 0.0-1.0 |
938
+
939
+ ## lora
940
+
941
+ | Name | Type | Description |
942
+ | ---- | ---- |------------------------|
943
+ | model_name | str | model name |
944
+ | weight | float | weight, default to 0.5 |
945
+
946
+ ## response
947
+
948
+ success response:
949
+
950
+ **async_process: True**
951
+
952
+ | Name | Type | Description |
953
+ | ---- | ---- |--------------|
954
+ | job_id | int | job ID |
955
+ | job_type | str | job type |
956
+ | job_stage | str | job stage |
957
+ | job_progress | float | job progress |
958
+ | job_status | str | job status |
959
+ | job_step_preview | str | job previes |
960
+ | job_result | str | job result |
961
+
962
+ **async_process: False**
963
+
964
+ | Name | Type | Description |
965
+ | ---- | ---- |----------------------------------------------------------------------------------|
966
+ | base64 | str | base64 image, according to `require_base64` params determines whether it is null |
967
+ | url | str | result image url |
968
+ | seed | int | image seed |
969
+ | finish_reason | str | finish reason |
970
+
971
+ fail response:
Fooocus-API/docs/api_doc_zh.md ADDED
@@ -0,0 +1,973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ - [简介](#简介)
2
+ - [Fooocus 能力相关接口](#fooocus-能力相关接口)
3
+ - [文生图 | text-to-image](#文生图--text-to-image)
4
+ - [图像放大 | image-upscale-vary](#图像放大--image-upscale-vary)
5
+ - [局部重绘 | image-inpaint-outpaint](#局部重绘--image-inpaint-outpaint)
6
+ - [图生图 | image-prompt](#图生图--image-prompt)
7
+ - [text-to-image-with-imageprompt](#text-to-image-with-imageprompt)
8
+ - [图像反推 | describe](#图像反推--describe)
9
+ - [列出模型 | all-models](#列出模型--all-models)
10
+ - [刷新模型 | refresh-models](#刷新模型--refresh-models)
11
+ - [样式 | styles](#样式--styles)
12
+ - [Fooocus API 任务相关接口](#fooocus-api-任务相关接口)
13
+ - [任务队列 | job-queue](#任务队列--job-queue)
14
+ - [查询任务 | query-job](#查询任务--query-job)
15
+ - [查询任务历史 | job-history](#查询任务历史--job-history)
16
+ - [停止任务 | stop](#停止任务--stop)
17
+ - [ping](#ping)
18
+ - [webhook](#webhook)
19
+ - [公共请求体](#公共请求体)
20
+ - [高级参数 | AdvanceParams](#高级参数--advanceparams)
21
+ - [lora](#lora)
22
+ - [响应参数 | response](#响应参数--response)
23
+
24
+
25
+
26
+ # 简介
27
+
28
+ Fooocus API 目前提供了十多个 REST 接口, 我大致将其分为两类, 第一类用来调用 Fooocus 的能力, 比如生成图像、刷新模型之类的, 第二类为 Fooocus API 自身相关的, 主要是任务查询相关。我会在接下来的内容中尝试说明它们的作用以及用法并提供示例。
29
+
30
+ > 几乎所有的接口参数都有默认值,这意味着你只需要发送你感兴趣的参数即可。完整的参数以及默认值可以通过表格查看
31
+
32
+ # Fooocus 能力相关接口
33
+
34
+ ## 文生图 | text-to-image
35
+
36
+ 对应 Fooocus 中的文生图功能
37
+
38
+ **基础信息:**
39
+
40
+ ```yaml
41
+ EndPoint: /v1/generation/text-to-image
42
+ Method: Post
43
+ DataType: json
44
+ ```
45
+ **请求参数:**
46
+
47
+ | Name | Type | Description |
48
+ | ---- | ---- | ----------- |
49
+ | prompt | string | 描述词, 默认为空字符串 |
50
+ | negative_prompt | string | 描述词, 反向描述词 |
51
+ | style_selections | List[str] | 风格列表, 需要是受支持的风格, 可以通过 [样式接口](#样式--styles) 获取所有支持的样式 |
52
+ | performance_selection | Enum | 性能选择, `Speed`, `Quality`, `Extreme Speed` 中的一个, 默认 `Speed`|
53
+ | aspect_ratios_selection | str | 分辨率, 默认 '1152*896' |
54
+ | image_number | int | 生成图片数量, 默认 1 , 最大32, 注: 非并行接口 |
55
+ | image_seed | int | 图片种子, 默认 -1, 即随机生成 |
56
+ | sharpness | float | 锐度, 默认 2.0 , 0-30 |
57
+ | guidance_scale | float | 引导比例, 默认 4.0 , 1-30 |
58
+ | base_model_name | str | 基础模型, 默认 `juggernautXL_version6Rundiffusion.safetensors` |
59
+ | refiner_model_name | str | 优化模型, 默认 `None` |
60
+ | refiner_switch | float | 优化模型切换时机, 默认 0.5 |
61
+ | loras | List[Lora] | lora 模型列表, 包含配置, lora 结构: [Lora](#lora) |
62
+ | advanced_params | AdvacedParams | 高级参数, AdvancedParams 结构 [AdvancedParams](#高级参数--advanceparams) |
63
+ | require_base64 | bool | 是否返回base64编码, 默认 False |
64
+ | async_process | bool | 是否异步处理, 默认 False |
65
+ | webhook_url | str | 异步处理完成后, 触发的 webhook 地址, 参考[webhook](#webhook) |
66
+
67
+ **响应参数:**
68
+
69
+ 多数响应结构式相同的, 不同的部分会进行特别说明.
70
+
71
+ 该接口返回通用响应结构, 参考[响应参数](#响应参数--response)
72
+
73
+ **请求示例:**
74
+
75
+ ```python
76
+ host = "http://127.0.0.1:8888"
77
+
78
+ def text2img(params: dict) -> dict:
79
+ """
80
+ 文生图
81
+ """
82
+ result = requests.post(url=f"{host}/v1/generation/text-to-image",
83
+ data=json.dumps(params),
84
+ headers={"Content-Type": "application/json"})
85
+ return result.json()
86
+
87
+ result =text2img({
88
+ "prompt": "1girl sitting on the ground",
89
+ "async_process": True})
90
+ print(result)
91
+ ```
92
+
93
+ ## 图像放大 | image-upscale-vary
94
+
95
+ 该接口对应 Fooocus 中的 Upscale or Variation 功能
96
+
97
+ 该接口参数继承自[文生图](#文生图--text-to-image), 因此后面只会列出和[文生图](#文生图--text-to-image)请求参数差异部分
98
+
99
+ 此外, 该接口提供了两个版本, 两个版本并无功能上的差异, 主要是请求方式略有区别
100
+
101
+ **基础信息:**
102
+
103
+ ```yaml
104
+ EndPoint_V1: /v1/generation/image-upscale-vary
105
+ EndPoint_V2: /v2/generation/image-upscale-vary
106
+ Method: Post
107
+ DataType: form|json
108
+ ```
109
+
110
+ ### V1
111
+
112
+ **请求参数**
113
+
114
+ | Name | Type | Description |
115
+ | ---- | ---- | ----------- |
116
+ | input_image | string($binary) | 二进制 str 图像 |
117
+ | uov_method | Enum | 'Vary (Subtle)','Vary (Strong)','Upscale (1.5x)','Upscale (2x)','Upscale (Fast 2x)','Upscale (Custom)' |
118
+ | upscale_value | float | 默认为 None , 1.0-5.0, 放大倍数, 仅在 'Upscale (Custom)' 中有效 |
119
+ | style_selections | List[str] | 以逗号分割的 Fooocus 风格列表 |
120
+ | loras | str(List[Lora]) | lora 模型列表, 包含配置, lora 结���: [Lora](#lora), 比如: [{"model_name": "sd_xl_offset_example-lora_1.0.safetensors", "weight": 0.5}] |
121
+ | advanced_params | str(AdvacedParams) | 高级参数, AdvancedParams 结构 [AdvancedParams](#高级参数--advanceparams), 以字符串形式发送, 可以为空 |
122
+
123
+ **响应参数:**
124
+
125
+ 该接口返回通用响应结构, 参考[响应参数](#响应参数--response)
126
+
127
+ **请求示例:**
128
+
129
+ ```python
130
+ # 不要加 {"Content-Type": "application/json"} 这个 header
131
+
132
+ host = "http://127.0.0.1:8888"
133
+ image = open("./examples/imgs/bear.jpg", "rb").read()
134
+
135
+ def upscale_vary(image, params: dict) -> dict:
136
+ """
137
+ Upscale or Vary
138
+ """
139
+ response = requests.post(url=f"{host}/v1/generation/image-upscale-vary",
140
+ data=params,
141
+ files={"input_image": image})
142
+ return response.json()
143
+
144
+ result =upscale_vary(image=image,
145
+ params={
146
+ "uov_method": "Upscale (2x)",
147
+ "async_process": True
148
+ })
149
+ print(json.dumps(result, indent=4, ensure_ascii=False))
150
+ ```
151
+
152
+ ### V2
153
+
154
+ **请求参数**
155
+
156
+ | Name | Type | Description |
157
+ | ---- | ---- | ----------- |
158
+ | uov_method | UpscaleOrVaryMethod | 是个枚举类型, 包括 'Vary (Subtle)','Vary (Strong)','Upscale (1.5x)','Upscale (2x)','Upscale (Fast 2x)','Upscale (Custom)' |
159
+ | upscale_value | float | 默认为 None , 1.0-5.0, 放大倍数, 仅在 'Upscale (Custom)' 中有效 |
160
+ | input_image | str | 输入图像, base64 格式, 或者一个URL |
161
+
162
+ **响应参数:**
163
+
164
+ 该接口返回通用响应结构, 参考[响应参数](#响应参数--response)
165
+
166
+ **请求示例:**
167
+
168
+ ```python
169
+ host = "http://127.0.0.1:8888"
170
+ image = open("./examples/imgs/bear.jpg", "rb").read()
171
+
172
+ def upscale_vary(image, params: dict) -> dict:
173
+ """
174
+ Upscale or Vary
175
+ """
176
+ params["input_image"] = base64.b64encode(image).decode('utf-8')
177
+ response = requests.post(url=f"{host}/v2/generation/image-upscale-vary",
178
+ data=json.dumps(params),
179
+ headers={"Content-Type": "application/json"},
180
+ timeout=300)
181
+ return response.json()
182
+
183
+ result =upscale_vary(image=image,
184
+ params={
185
+ "uov_method": "Upscale (2x)",
186
+ "async_process": True
187
+ })
188
+ print(json.dumps(result, indent=4, ensure_ascii=False))
189
+ ```
190
+
191
+ ## 局部重绘 | image-inpaint-outpaint
192
+
193
+ **基础信息:**
194
+
195
+ ```yaml
196
+ EndPoint_V1: /v1/generation/image-inpait-outpaint
197
+ EndPoint_V2: /v2/generation/image-inpait-outpaint
198
+ Method: Post
199
+ DataType: form|json
200
+ ```
201
+
202
+ ### V1
203
+
204
+ **请求参数**
205
+
206
+ | Name | Type | Description |
207
+ | ---- | ---- | ----------- |
208
+ | input_image | string($binary) | 二进制 str 图像 |
209
+ | input_mask | string($binary) | 二进制 str 图像 |
210
+ | inpaint_additional_prompt | string | 附加描述 |
211
+ | outpaint_selections | str | 图像扩展方向, 逗号分割的 'Left', 'Right', 'Top', 'Bottom' |
212
+ | outpaint_distance_left | int | 图像扩展距离, 默认 0 |
213
+ | outpaint_distance_right | int | 图像扩展距离, 默认 0 |
214
+ | outpaint_distance_top | int | 图像扩展距离, 默认 0 |
215
+ | outpaint_distance_bottom | int | 图像扩展距离, 默认 0 |
216
+ | style_selections | List[str] | 以逗号分割的 Fooocus 风格列表 |
217
+ | loras | str(List[Lora]) | lora 模型列表, 包含配置, lora 结构: [Lora](#lora), 比如: [{"model_name": "sd_xl_offset_example-lora_1.0.safetensors", "weight": 0.5}] |
218
+ | advanced_params | str(AdvacedParams) | 高级参数, AdvancedParams 结构 [AdvancedParams](#高级参数--advanceparams), 以字符串形式发送 |
219
+
220
+ **响应参数:**
221
+
222
+ 该接口返回通用响应结构, 参考[响应参数](#响应参数--response)
223
+
224
+ **请求示例:**
225
+
226
+ ```python
227
+ # 局部重绘 v1 接口示例
228
+ host = "http://127.0.0.1:8888"
229
+ image = open("./examples/imgs/bear.jpg", "rb").read()
230
+
231
+ def inpaint_outpaint(params: dict, input_image: bytes, input_mask: bytes = None) -> dict:
232
+ """
233
+ 局部重绘 v1 接口示例
234
+ """
235
+ response = requests.post(url=f"{host}/v1/generation/image-inpait-outpaint",
236
+ data=params,
237
+ files={"input_image": input_image,
238
+ "input_mask": input_mask})
239
+ return response.json()
240
+
241
+ # 图片扩展示例
242
+ result = inpaint_outpaint(params={
243
+ "outpaint_selections": "Left,Right",
244
+ "async_process": True},
245
+ input_image=image,
246
+ input_mask=None)
247
+ print(json.dumps(result, indent=4, ensure_ascii=False))
248
+
249
+ # 局部重绘示例
250
+ source = open("./examples/imgs/s.jpg", "rb").read()
251
+ mask = open("./examples/imgs/m.png", "rb").read()
252
+ result = inpaint_outpaint(params={
253
+ "prompt": "a cat",
254
+ "async_process": True},
255
+ input_image=source,
256
+ input_mask=mask)
257
+ print(json.dumps(result, indent=4, ensure_ascii=False))
258
+ ```
259
+
260
+ ### V2
261
+
262
+ **请求参数**
263
+
264
+ | Name | Type | Description |
265
+ | ---- | ---- |-----------------------------------------------------------------|
266
+ | input_image | str | 输入图像, base64 格式, 或者一个URL |
267
+ | input_mask | str | 输入遮罩, base64 格式, 或者一个URL |
268
+ | inpaint_additional_prompt | str | 附加描述词 |
269
+ | outpaint_selections | List[OutpaintExpansion] | OutpaintExpansion 是一个枚举类型, 值包括 "Left", "Right", "Top", "Bottom" |
270
+ | outpaint_distance_left | int | 图像扩展距离, 默认 0 |
271
+ | outpaint_distance_right | int | 图像扩展距离, 默认 0 |
272
+ | outpaint_distance_top | int | 图像扩展距离, 默认 0 |
273
+ | outpaint_distance_bottom | int | 图像扩展距离, 默认 0 |
274
+
275
+ **响应参数:**
276
+
277
+ 该接口返回通用响应结构, 参考[响应参数](#响应参数--response)
278
+
279
+ **请求示例:**
280
+
281
+ ```python
282
+ # 局部重绘 v2 接口示例
283
+ host = "http://127.0.0.1:8888"
284
+ image = open("./examples/imgs/bear.jpg", "rb").read()
285
+
286
+ def inpaint_outpaint(params: dict) -> dict:
287
+ """
288
+ 局部重绘 v1 接口示例
289
+ """
290
+ response = requests.post(url=f"{host}/v2/generation/image-inpait-outpaint",
291
+ data=json.dumps(params),
292
+ headers={"Content-Type": "application/json"})
293
+ return response.json()
294
+
295
+ # 图像扩展示例
296
+ result = inpaint_outpaint(params={
297
+ "input_image": base64.b64encode(image).decode('utf-8'),
298
+ "input_mask": None,
299
+ "outpaint_selections": ["Left", "Right"],
300
+ "async_process": True})
301
+ print(json.dumps(result, indent=4, ensure_ascii=False))
302
+
303
+ # 局部重绘示例
304
+ source = open("./examples/imgs/s.jpg", "rb").read()
305
+ mask = open("./examples/imgs/m.png", "rb").read()
306
+ result = inpaint_outpaint(params={
307
+ "prompt": "a cat",
308
+ "input_image": base64.b64encode(source).decode('utf-8'),
309
+ "input_mask": base64.b64encode(mask).decode('utf-8'),
310
+ "async_process": True})
311
+ print(json.dumps(result, indent=4, ensure_ascii=False))
312
+ ```
313
+
314
+ ## 图生图 | image-prompt
315
+
316
+ 该接口更新自 `v0.3.27` 后有重大更新。从继承自 [文生图](#文生图--text-to-image) 更改为继承自 [局部重绘](#局部重绘--image-inpaint-outpaint)
317
+
318
+ 该版本之后可以通过该接口实现 `inpaint_outpaint` 以及 `image-prompt` 接口的功能
319
+
320
+ > 多功能接口,并非可以同时实现 `inpaint_outpaint` 以及 `image-prompt` 接口的功能
321
+
322
+ **基础信息:**
323
+
324
+ ```yaml
325
+ EndPoint_V1: /v1/generation/image-prompt
326
+ EndPoint_V2: /v2/generation/image-prompt
327
+ Method: Post
328
+ DataType: form|json
329
+ ```
330
+
331
+ ### V1
332
+
333
+ **请求参数**
334
+
335
+ > 注意: 虽然接口更改为继承自[局部重绘](#局部重绘--image-inpaint-outpaint), 但下方表格展示的仍然继承自[文生图](#文生图--text-to-image), 但参数是完整的
336
+
337
+ | Name | Type | Description |
338
+ | ---- | ---- | ----------- |
339
+ | input_image | Bytes | 二进制图像, 用于局部重绘 |
340
+ | input_mask | Bytes | 二进制图像遮罩, 用于局部重绘 |
341
+ | inpaint_additional_prompt | str | inpaint 附加提示词 |
342
+ | outpaint_selections | str | 图像扩展选项, 逗号分割的 "Left", "Right", "Top", "Bottom" |
343
+ | outpaint_distance_left | int | 图像扩展距离, 默认 0 |
344
+ | outpaint_distance_right | int | 图像扩展距离, 默认 0 |
345
+ | outpaint_distance_top | int | 图像扩展距离, 默认 0 |
346
+ | outpaint_distance_bottom | int | 图像扩展距离, 默认 0 |
347
+ | cn_img1 | string($binary) | 二进制 str 图像 |
348
+ | cn_stop1 | float | 默认 0.6 |
349
+ | cn_weight1 | float | 默认 0.6 |
350
+ | cn_type1 | Emum | "ImagePrompt", "FaceSwap", "PyraCanny", "CPDS" 中的一个 |
351
+ | cn_img2 | string($binary) | 二进制 str 图像 |
352
+ | cn_stop2 | float | 默认 0.6 |
353
+ | cn_weight2 | float | 默认 0.6 |
354
+ | cn_type2 | Emum | "ImagePrompt", "FaceSwap", "PyraCanny", "CPDS" 中的一个 |
355
+ | cn_img3 | string($binary) | 二进制 str 图像 |
356
+ | cn_stop3 | float | 默认 0.6 |
357
+ | cn_weight3 | float | 默认 0.6 |
358
+ | cn_type3 | Emum | "ImagePrompt", "FaceSwap", "PyraCanny", "CPDS" 中的一个 |
359
+ | cn_img4 | string($binary) | 二进制 str 图像 |
360
+ | cn_stop4 | float | 默认 0.6 |
361
+ | cn_weight4 | float | 默认 0.6 |
362
+ | cn_type4 | Emum | "ImagePrompt", "FaceSwap", "PyraCanny", "CPDS" 中的一个 |
363
+ | style_selections | List[str] | 以逗号分割的 Fooocus 风格列表 |
364
+ | loras | str(List[Lora]) | lora 模型列表, 包含配置, lora 结构: [Lora](#lora), 比如: [{"model_name": "sd_xl_offset_example-lora_1.0.safetensors", "weight": 0.5}] |
365
+ | advanced_params | str(AdvacedParams) | 高级参数, AdvancedParams 结构 [AdvancedParams](#高级参数--advanceparams), 以字符串形式发送 |
366
+
367
+ **响应参数:**
368
+
369
+ 该接口返回通用响应结构, 参考[响应参数](#响应参数--response)
370
+
371
+ **请求示例:**
372
+
373
+ ```python
374
+ # image_prompt v1 接口示例
375
+ host = "http://127.0.0.1:8888"
376
+ image = open("./examples/imgs/bear.jpg", "rb").read()
377
+ source = open("./examples/imgs/s.jpg", "rb").read()
378
+ mask = open("./examples/imgs/m.png", "rb").read()
379
+
380
+ def image_prompt(params: dict,
381
+ input_iamge: bytes=None,
382
+ input_mask: bytes=None,
383
+ cn_img1: bytes=None,
384
+ cn_img2: bytes=None,
385
+ cn_img3: bytes=None,
386
+ cn_img4: bytes=None,) -> dict:
387
+ """
388
+ image prompt
389
+ """
390
+ response = requests.post(url=f"{host}/v1/generation/image-prompt",
391
+ data=params,
392
+ files={
393
+ "input_image": input_iamge,
394
+ "input_mask": input_mask,
395
+ "cn_img1": cn_img1,
396
+ "cn_img2": cn_img2,
397
+ "cn_img3": cn_img3,
398
+ "cn_img4": cn_img4,
399
+ })
400
+ return response.json()
401
+
402
+ # 图像扩展
403
+ params = {
404
+ "outpaint_selections": ["Left", "Right"],
405
+ "image_prompts": [] # 必传参数,可以为空列表
406
+ }
407
+ result = image_prompt(params=params, input_iamge=image)
408
+ print(json.dumps(result, indent=4, ensure_ascii=False))
409
+
410
+ # 局部重绘
411
+
412
+ params = {
413
+ "prompt": "1girl sitting on the chair",
414
+ "image_prompts": [], # 必传参数,可以为空列表
415
+ "async_process": True
416
+ }
417
+ result = image_prompt(params=params, input_iamge=source, input_mask=mask)
418
+ print(json.dumps(result, indent=4, ensure_ascii=False))
419
+
420
+ # image prompt
421
+
422
+ params = {
423
+ "prompt": "1girl sitting on the chair",
424
+ "image_prompts": [
425
+ {
426
+ "cn_stop": 0.6,
427
+ "cn_weight": 0.6,
428
+ "cn_type": "ImagePrompt"
429
+ },{
430
+ "cn_stop": 0.6,
431
+ "cn_weight": 0.6,
432
+ "cn_type": "ImagePrompt"
433
+ }]
434
+ }
435
+ result = image_prompt(params=params, cn_img1=image, cn_img2=source)
436
+ print(json.dumps(result, indent=4, ensure_ascii=False))
437
+ ```
438
+
439
+ ### V2
440
+
441
+ **请求参数**
442
+
443
+ | Name | Type | Description |
444
+ | ---- | ---- | ----------- |
445
+ | input_image | str | base64 图像, 或者一个URL, 用于局部重绘 |
446
+ | input_mask | str | base64 图像遮罩, 或者一个URL, 用于局部重绘 |
447
+ | inpaint_additional_prompt | str | inpaint 附加提示词 |
448
+ | outpaint_selections | List[OutpaintExpansion] | 图像扩展选项, 逗号分割的 "Left", "Right", "Top", "Bottom" |
449
+ | outpaint_distance_left | int | 图像扩展距离, 默认 0 |
450
+ | outpaint_distance_right | int | 图像扩展距离, 默认 0 |
451
+ | outpaint_distance_top | int | 图像扩展距离, 默认 0 |
452
+ | outpaint_distance_bottom | int | 图像扩展距离, 默认 0 |
453
+ | image_prompts | List[ImagePrompt] | 图像列表, 包含配置, ImagePrompt 结构如下: |
454
+
455
+ **ImagePrompt**
456
+
457
+ | Name | Type | Description |
458
+ | ---- | ---- | ----------- |
459
+ | cn_img | str | 输入图像, base64 编码, 或者一个URL |
460
+ | cn_stop | float | 停止位置, 范围 0-1, 默认 0.5 |
461
+ | cn_weight | float | 权重, 范围 0-2, 默认 1.0 |
462
+ | cn_type | ControlNetType | 控制网络类型, 是一个枚举类型, 包括: "ImagePrompt", "FaceSwap", "PyraCanny", "CPDS" |
463
+
464
+ **响应参数:**
465
+
466
+ 该接口返回通用响应结构, 参考[响应参数](#响应参数--response)
467
+
468
+ **请求示例:**
469
+
470
+ ```python
471
+ # image_prompt v2 接口示例
472
+ host = "http://127.0.0.1:8888"
473
+ image = open("./examples/imgs/bear.jpg", "rb").read()
474
+ source = open("./examples/imgs/s.jpg", "rb").read()
475
+ mask = open("./examples/imgs/m.png", "rb").read()
476
+
477
+ def image_prompt(params: dict) -> dict:
478
+ """
479
+ image prompt
480
+ """
481
+ response = requests.post(url=f"{host}/v2/generation/image-prompt",
482
+ data=json.dumps(params),
483
+ headers={"Content-Type": "application/json"})
484
+ return response.json()
485
+
486
+ # 图像扩展
487
+ params = {
488
+ "input_image": base64.b64encode(image).decode('utf-8'),
489
+ "outpaint_selections": ["Left", "Right"],
490
+ "image_prompts": [] # 必传参数,可以为空列表
491
+ }
492
+ result = image_prompt(params)
493
+ print(json.dumps(result, indent=4, ensure_ascii=False))
494
+
495
+ # 局部重绘
496
+
497
+ params = {
498
+ "prompt": "1girl sitting on the chair",
499
+ "input_image": base64.b64encode(source).decode('utf-8'),
500
+ "input_mask": base64.b64encode(mask).decode('utf-8'),
501
+ "image_prompts": [], # 必传参数,可以为空列表
502
+ "async_process": True
503
+ }
504
+ result = image_prompt(params)
505
+ print(json.dumps(result, indent=4, ensure_ascii=False))
506
+
507
+ # image prompt
508
+
509
+ params = {
510
+ "prompt": "1girl sitting on the chair",
511
+ "image_prompts": [
512
+ {
513
+ "cn_img": base64.b64encode(source).decode('utf-8'),
514
+ "cn_stop": 0.6,
515
+ "cn_weight": 0.6,
516
+ "cn_type": "ImagePrompt"
517
+ },{
518
+ "cn_img": base64.b64encode(image).decode('utf-8'),
519
+ "cn_stop": 0.6,
520
+ "cn_weight": 0.6,
521
+ "cn_type": "ImagePrompt"
522
+ }]
523
+ }
524
+ result = image_prompt(params)
525
+ print(json.dumps(result, indent=4, ensure_ascii=False))
526
+ ```
527
+
528
+ ## text to image with imageprompt
529
+
530
+ 该接口暂无 v1 版本
531
+
532
+ **基础信息:**
533
+
534
+ ```yaml
535
+ EndPoint: /v2/generation/text-to-image-with-ip
536
+ Method: Post
537
+ DataType: json
538
+ ```
539
+
540
+ **请求参数**
541
+
542
+ | Name | Type | Description |
543
+ | ---- | ---- | ----------- |
544
+ | image_prompts | List[ImagePrompt] | 图像列表 |
545
+
546
+ **请求示例**:
547
+
548
+ ```python
549
+ # text to image with imageprompt 示例
550
+ host = "http://127.0.0.1:8888"
551
+ image = open("./examples/imgs/bear.jpg", "rb").read()
552
+ source = open("./examples/imgs/s.jpg", "rb").read()
553
+ def image_prompt(params: dict) -> dict:
554
+ """
555
+ image prompt
556
+ """
557
+ response = requests.post(url=f"{host}/v2/generation/text-to-image-with-ip",
558
+ data=json.dumps(params),
559
+ headers={"Content-Type": "application/json"})
560
+ return response.json()
561
+
562
+ params = {
563
+ "prompt": "A bear",
564
+ "image_prompts": [
565
+ {
566
+ "cn_img": base64.b64encode(source).decode('utf-8'),
567
+ "cn_stop": 0.6,
568
+ "cn_weight": 0.6,
569
+ "cn_type": "ImagePrompt"
570
+ },{
571
+ "cn_img": base64.b64encode(image).decode('utf-8'),
572
+ "cn_stop": 0.6,
573
+ "cn_weight": 0.6,
574
+ "cn_type": "ImagePrompt"
575
+ }
576
+ ]
577
+ }
578
+ result = image_prompt(params)
579
+ print(json.dumps(result, indent=4, ensure_ascii=False))
580
+ ```
581
+
582
+ ## 图像反推 | describe
583
+
584
+ **基础信息:**
585
+
586
+ ```yaml
587
+ EndPoint: /v1/tools/describe-image
588
+ Method: Post
589
+ DataType: form
590
+ ```
591
+
592
+ **请求参数**
593
+
594
+ | Name | Type | Description |
595
+ |------|------|-----------------------------|
596
+ | type | Enum | 反推类型, "Photo", "Anime" 中的一个 |
597
+
598
+ **请求示例**:
599
+
600
+ ```python
601
+ def describe_image(image: bytes,
602
+ params: dict = {"type": "Photo"}) -> dict:
603
+ """
604
+ describe-image
605
+ """
606
+ response = requests.post(url="http://127.0.0.1:8888/v1/tools/describe-image",
607
+ files={
608
+ "image": image
609
+ },
610
+ timeout=30)
611
+ return response.json()
612
+ ```
613
+
614
+ **响应示例**:
615
+
616
+ ```python
617
+ {
618
+ "describe": "a young woman posing with her hands behind her head"
619
+ }
620
+ ```
621
+
622
+ --------------------------------------------
623
+
624
+ ## 列出模型 | all-models
625
+
626
+ **基础信息:**
627
+
628
+ ```yaml
629
+ EndPoint: /v1/engines/all-models
630
+ Method: Get
631
+ ```
632
+
633
+ **请求示例**:
634
+
635
+ ```python
636
+ def all_models() -> dict:
637
+ """
638
+ all-models
639
+ """
640
+ response = requests.get(url="http://127.0.0.1:8888/v1/engines/all-models",
641
+ timeout=30)
642
+ return response.json()
643
+ ```
644
+
645
+ **响应示例**:
646
+
647
+ ```python
648
+ {
649
+ "model_filenames": [
650
+ "juggernautXL_version6Rundiffusion.safetensors",
651
+ "sd_xl_base_1.0_0.9vae.safetensors",
652
+ "sd_xl_refiner_1.0_0.9vae.safetensors"
653
+ ],
654
+ "lora_filenames": [
655
+ "sd_xl_offset_example-lora_1.0.safetensors"
656
+ ]
657
+ }
658
+ ```
659
+
660
+ ## 刷新模型 | refresh-models
661
+
662
+ **基础信息:**
663
+
664
+ ```yaml
665
+ EndPoint: /v1/engines/refresh-models
666
+ Method: Post
667
+ ```
668
+
669
+ **请求示例**
670
+ ```python
671
+ def refresh() -> dict:
672
+ """
673
+ refresh-models
674
+ """
675
+ response = requests.post(url="http://127.0.0.1:8888/v1/engines/refresh-models",
676
+ timeout=30)
677
+ return response.json()
678
+ ```
679
+
680
+ **响应示例**
681
+ ```python
682
+ {
683
+ "model_filenames": [
684
+ "juggernautXL_version6Rundiffusion.safetensors",
685
+ "sd_xl_base_1.0_0.9vae.safetensors",
686
+ "sd_xl_refiner_1.0_0.9vae.safetensors"
687
+ ],
688
+ "lora_filenames": [
689
+ "sd_xl_offset_example-lora_1.0.safetensors"
690
+ ]
691
+ }
692
+ ```
693
+
694
+ ## 样式 | styles
695
+
696
+ **基础信息:**
697
+
698
+ ```yaml
699
+ EndPoint: /v1/engines/styles
700
+ Method: Get
701
+ ```
702
+
703
+ **请求示例**:
704
+
705
+ ```python
706
+ def styles() -> dict:
707
+ """
708
+ styles
709
+ """
710
+ response = requests.get(url="http://127.0.0.1:8888/v1/engines/styles",
711
+ timeout=30)
712
+ return response.json()
713
+ ```
714
+
715
+ **响应示例**:
716
+
717
+ ```python
718
+ [
719
+ "Fooocus V2",
720
+ "Fooocus Enhance",
721
+ ...
722
+ "Watercolor 2",
723
+ "Whimsical And Playful"
724
+ ]
725
+ ```
726
+
727
+ # Fooocus API 任务相关接口
728
+
729
+ ## 任务队列 | job-queue
730
+
731
+ **基础信息:**
732
+
733
+ ```yaml
734
+ EndPoint: /v1/engines/job-queue
735
+ Method: Get
736
+ ```
737
+
738
+ **请求示例**:
739
+
740
+ ```python
741
+ def job_queue() -> dict:
742
+ """
743
+ job-queue
744
+ """
745
+ response = requests.get(url="http://127.0.0.1:8888/v1/generation/job-queue",
746
+ timeout=30)
747
+ return response.json()
748
+ ```
749
+
750
+ **响应示例**:
751
+
752
+ ```python
753
+ {
754
+ "running_size": 0,
755
+ "finished_size": 1,
756
+ "last_job_id": "cac3914a-926d-4b6f-a46a-83794a0ce1d4"
757
+ }
758
+ ```
759
+
760
+ ## 查询任务 | query-job
761
+
762
+ **基础信息:**
763
+
764
+ ```yaml
765
+ EndPoint: /v1/generation/query-job
766
+ Method: Get
767
+ ```
768
+
769
+ **请求示例**:
770
+ ```python
771
+ def taskResult(task_id: str) -> dict:
772
+ # 获取任务状态
773
+ task_status = requests.get(url="http://127.0.0.1:8888/v1/generation/query-job",
774
+ params={"job_id": task_id,
775
+ "require_step_preivew": False},
776
+ timeout=30)
777
+
778
+ return task_status.json()
779
+ ```
780
+
781
+ **响应示例**:
782
+ ```python
783
+ {
784
+ "job_id": "cac3914a-926d-4b6f-a46a-83794a0ce1d4",
785
+ "job_type": "Text to Image",
786
+ "job_stage": "SUCCESS",
787
+ "job_progress": 100,
788
+ "job_status": "Finished",
789
+ "job_step_preview": null,
790
+ "job_result": [
791
+ {
792
+ "base64": null,
793
+ "url": "http://127.0.0.1:8888/files/2023-11-27/b928e50e-3c09-4187-a3f9-1c12280bfd95.png",
794
+ "seed": 8228839561385006000,
795
+ "finish_reason": "SUCCESS"
796
+ }
797
+ ]
798
+ }
799
+ ```
800
+
801
+ ## 查询任务历史 | job-history
802
+
803
+ **基础信息:**
804
+
805
+ ```yaml
806
+ EndPoint: /v1/generation/job-history
807
+ Method: get
808
+ ```
809
+
810
+ **请求示例**:
811
+
812
+ ```python
813
+ def job-history() -> dict:
814
+ """
815
+ job-history
816
+ """
817
+ response = requests.get(url="http://127.0.0.1:8888/v1/generation/job-history",
818
+ timeout=30)
819
+ return response.json()
820
+ ```
821
+
822
+ **响应示例**:
823
+
824
+ ```python
825
+ {
826
+ "queue": [],
827
+ "history": [
828
+ "job_id": "cac3914a-926d-4b6f-a46a-83794a0ce1d4",
829
+ "is_finished": True
830
+ ]
831
+ }
832
+ ```
833
+
834
+ ## 停止任务 | stop
835
+
836
+ **基础信息:**
837
+
838
+ ```yaml
839
+ EndPoint: /v1/generation/stop
840
+ Method: post
841
+ ```
842
+
843
+ **请求示例**:
844
+
845
+ ```python
846
+ def stop() -> dict:
847
+ """
848
+ stop
849
+ """
850
+ response = requests.post(url="http://127.0.0.1:8888/v1/generation/stop",
851
+ timeout=30)
852
+ return response.json()
853
+ ```
854
+
855
+ **响应示例**:
856
+
857
+ ```python
858
+ {
859
+ "msg": "success"
860
+ }
861
+ ```
862
+
863
+ ## ping
864
+
865
+ **基础信息:**
866
+
867
+ ```yaml
868
+ EndPoint: /ping
869
+ Method: get
870
+ ```
871
+
872
+ pong
873
+
874
+ # webhook
875
+
876
+ 你可以在命令行通过 `--webhook-url` 指定一个地址,以便异步任务完成之后可以收到通知
877
+
878
+ 下面是一个简单的示例来展示 `webhook` 是如何工作的
879
+
880
+ 首先,使用下面的代码启动一个简易服务器:
881
+
882
+ ```python
883
+ from fastapi import FastAPI
884
+ import uvicorn
885
+
886
+ app = FastAPI()
887
+
888
+ @app.post("/status")
889
+ async def status(requests: dict):
890
+ print(requests)
891
+
892
+ uvicorn.run(app, host="0.0.0.0", port=8000)
893
+ ```
894
+
895
+ 然后, 在启动 Fooocus API 时添加 `--webhook-url http://host:8000/status`
896
+
897
+ 通过任意方式提交一个任务, 等完成后你会在这个简易服务器的后台看到任务结束信息:
898
+
899
+ ```python
900
+ {'job_id': '717ec0b5-85df-4174-80d6-bddf93cd8248', 'job_result': [{'url': 'http://127.0.0.1:8888/files/2023-12-29/f1eca704-718e-4781-9d5f-82d41aa799d7.png', 'seed': '3283449865282320931'}]}
901
+ ```
902
+
903
+ # 公共请求体
904
+
905
+ ## 高级参数 | AdvanceParams
906
+
907
+ | Name | Type | Description |
908
+ | ---- | ---- | ----------- |
909
+ | disable_preview | bool | 是否禁用预览, 默认 False |
910
+ | adm_scaler_positive | float | 正 ADM Guidance Scaler, 默认 1.5, 范围 0.1-3.0 |
911
+ | adm_scaler_negative | float | 负 ADM Guidance Scaler, 默认 0.8, 范围 0.1-3.0 |
912
+ | adm_scaler_end | float | ADM Guidance Scaler 结束值, 默认 0.5, 范围 0.0-1.0 |
913
+ | refiner_swap_method | str | 优化模型交换方法, 默认 `joint` |
914
+ | adaptive_cfg | float | CFG Mimicking from TSNR, 默认 7.0, 范围 1.0-30.0 |
915
+ | sampler_name | str | 采样器, 默认 `default_sampler` |
916
+ | scheduler_name | str | 调度器, 默认 `default_scheduler` |
917
+ | overwrite_step | int | Forced Overwrite of Sampling Step, 默认 -1, 范围 -1-200 |
918
+ | overwrite_switch | int | Forced Overwrite of Refiner Switch Step, 默认 -1, 范围 -1-200 |
919
+ | overwrite_width | int | Forced Overwrite of Generating Width, 默认 -1, 范围 -1-2048 |
920
+ | overwrite_height | int | Forced Overwrite of Generating Height, 默认 -1, 范围 -1-2048 |
921
+ | overwrite_vary_strength | float | Forced Overwrite of Denoising Strength of "Vary", 默认 -1, 范围 -1-1.0 |
922
+ | overwrite_upscale_strength | float | Forced Overwrite of Denoising Strength of "Upscale", 默认 -1, 范围 -1-1.0 |
923
+ | mixing_image_prompt_and_vary_upscale | bool | Mixing Image Prompt and Vary/Upscale, 默认 False |
924
+ | mixing_image_prompt_and_inpaint | bool | Mixing Image Prompt and Inpaint, 默认 False |
925
+ | debugging_cn_preprocessor | bool | Debug Preprocessors, 默认 False |
926
+ | skipping_cn_preprocessor | bool | Skip Preprocessors, 默认 False |
927
+ | controlnet_softness | float | Softness of ControlNet, 默认 0.25, 范围 0.0-1.0 |
928
+ | canny_low_threshold | int | Canny Low Threshold, 默认 64, 范围 1-255 |
929
+ | canny_high_threshold | int | Canny High Threshold, 默认 128, 范围 1-255 |
930
+ | freeu_enabled | bool | FreeU enabled, 默认 False |
931
+ | freeu_b1 | float | FreeU B1, 默认 1.01 |
932
+ | freeu_b2 | float | FreeU B2, 默认 1.02 |
933
+ | freeu_s1 | float | FreeU B3, 默认 0.99 |
934
+ | freeu_s2 | float | FreeU B4, 默认 0.95 |
935
+ | debugging_inpaint_preprocessor | bool | Debug Inpaint Preprocessing, 默认 False |
936
+ | inpaint_disable_initial_latent | bool | Disable initial latent in inpaint, 默认 False |
937
+ | inpaint_engine | str | Inpaint Engine, 默认 `v1` |
938
+ | inpaint_strength | float | Inpaint Denoising Strength, 默认 1.0, 范围 0.0-1.0 |
939
+ | inpaint_respective_field | float | Inpaint Respective Field, 默认 1.0, 范围 0.0-1.0 |
940
+
941
+ ## lora
942
+
943
+ | Name | Type | Description |
944
+ | ---- | ---- | ----------- |
945
+ | model_name | str | 模型名称 |
946
+ | weight | float | 权重, 默认 0.5 |
947
+
948
+ ## 响应参数 | response
949
+
950
+ 成功响应:
951
+
952
+ **async_process: True**
953
+
954
+ | Name | Type | Description |
955
+ | ---- | ---- | ----------- |
956
+ | job_id | int | 任务ID |
957
+ | job_type | str | 任务类型 |
958
+ | job_stage | str | 任务阶段 |
959
+ | job_progress | float | 任务进度 |
960
+ | job_status | str | 任务状态 |
961
+ | job_step_preview | str | 任务预览 |
962
+ | job_result | str | 任务结果 |
963
+
964
+ **async_process: False**
965
+
966
+ | Name | Type | Description |
967
+ | ---- | ---- | ----------- |
968
+ | base64 | str | 图片base64编码, 根据 `require_base64` 参数决定是否为 null |
969
+ | url | str | 图片url |
970
+ | seed | int | 图片种子 |
971
+ | finish_reason | str | 任务结束原因 |
972
+
973
+ 失败响应:
Fooocus-API/docs/openapi.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"openapi":"3.1.0","info":{"title":"FastAPI","version":"0.1.0"},"paths":{"/":{"get":{"summary":"Home","operationId":"home__get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}}}}},"/ping":{"get":{"summary":"Ping","description":"Returns a simple 'pong' response","operationId":"ping_ping_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{}}}}}}},"/v1/generation/text-to-image":{"post":{"summary":"Text2Img Generation","operationId":"text2img_generation_v1_generation_text_to_image_post","parameters":[{"name":"accept","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes","title":"Accept"},"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes"},{"name":"accept","in":"header","required":false,"schema":{"type":"string","title":"Accept"}}],"requestBody":{"required":true,"content":{"application/json":{"schema":{"$ref":"#/components/schemas/Text2ImgRequest"}}}},"responses":{"200":{"description":"PNG bytes if request's 'Accept' header is 'image/png', otherwise JSON","content":{"application/json":{"schema":{"anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/GeneratedImageResult"}},{"$ref":"#/components/schemas/AsyncJobResponse"}],"title":"Response Text2Img Generation V1 Generation Text To Image Post"},"example":[{"base64":"...very long string...","seed":"1050625087","finish_reason":"SUCCESS"}]},"application/json async":{"example":{"job_id":1,"job_type":"Text to Image"}},"image/png":{"example":"PNG bytes, what did you expect?"}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/v2/generation/text-to-image-with-ip":{"post":{"summary":"Text To Img With Ip","operationId":"text_to_img_with_ip_v2_generation_text_to_image_with_ip_post","parameters":[{"name":"accept","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes","title":"Accept"},"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes"},{"name":"accept","in":"header","required":false,"schema":{"type":"string","title":"Accept"}}],"requestBody":{"required":true,"content":{"application/json":{"schema":{"$ref":"#/components/schemas/Text2ImgRequestWithPrompt"}}}},"responses":{"200":{"description":"PNG bytes if request's 'Accept' header is 'image/png', otherwise JSON","content":{"application/json":{"schema":{"anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/GeneratedImageResult"}},{"$ref":"#/components/schemas/AsyncJobResponse"}],"title":"Response Text To Img With Ip V2 Generation Text To Image With Ip Post"},"example":[{"base64":"...very long string...","seed":"1050625087","finish_reason":"SUCCESS"}]},"application/json async":{"example":{"job_id":1,"job_type":"Text to Image"}},"image/png":{"example":"PNG bytes, what did you expect?"}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/v1/generation/image-upscale-vary":{"post":{"summary":"Img Upscale Or Vary","operationId":"img_upscale_or_vary_v1_generation_image_upscale_vary_post","parameters":[{"name":"accept","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes","title":"Accept"},"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes"},{"name":"accept","in":"header","required":false,"schema":{"type":"string","title":"Accept"}}],"requestBody":{"required":true,"content":{"multipart/form-data":{"schema":{"$ref":"#/components/schemas/Body_img_upscale_or_vary_v1_generation_image_upscale_vary_post"}}}},"responses":{"200":{"description":"PNG bytes if request's 'Accept' header is 'image/png', otherwise JSON","content":{"application/json":{"schema":{"anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/GeneratedImageResult"}},{"$ref":"#/components/schemas/AsyncJobResponse"}],"title":"Response Img Upscale Or Vary V1 Generation Image Upscale Vary Post"},"example":[{"base64":"...very long string...","seed":"1050625087","finish_reason":"SUCCESS"}]},"application/json async":{"example":{"job_id":1,"job_type":"Text to Image"}},"image/png":{"example":"PNG bytes, what did you expect?"}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/v2/generation/image-upscale-vary":{"post":{"summary":"Img Upscale Or Vary V2","operationId":"img_upscale_or_vary_v2_v2_generation_image_upscale_vary_post","parameters":[{"name":"accept","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes","title":"Accept"},"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes"},{"name":"accept","in":"header","required":false,"schema":{"type":"string","title":"Accept"}}],"requestBody":{"required":true,"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ImgUpscaleOrVaryRequestJson"}}}},"responses":{"200":{"description":"PNG bytes if request's 'Accept' header is 'image/png', otherwise JSON","content":{"application/json":{"schema":{"anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/GeneratedImageResult"}},{"$ref":"#/components/schemas/AsyncJobResponse"}],"title":"Response Img Upscale Or Vary V2 V2 Generation Image Upscale Vary Post"},"example":[{"base64":"...very long string...","seed":"1050625087","finish_reason":"SUCCESS"}]},"application/json async":{"example":{"job_id":1,"job_type":"Text to Image"}},"image/png":{"example":"PNG bytes, what did you expect?"}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/v1/generation/image-inpait-outpaint":{"post":{"summary":"Img Inpaint Or Outpaint","operationId":"img_inpaint_or_outpaint_v1_generation_image_inpait_outpaint_post","parameters":[{"name":"accept","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes","title":"Accept"},"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes"},{"name":"accept","in":"header","required":false,"schema":{"type":"string","title":"Accept"}}],"requestBody":{"required":true,"content":{"multipart/form-data":{"schema":{"$ref":"#/components/schemas/Body_img_inpaint_or_outpaint_v1_generation_image_inpait_outpaint_post"}}}},"responses":{"200":{"description":"PNG bytes if request's 'Accept' header is 'image/png', otherwise JSON","content":{"application/json":{"schema":{"anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/GeneratedImageResult"}},{"$ref":"#/components/schemas/AsyncJobResponse"}],"title":"Response Img Inpaint Or Outpaint V1 Generation Image Inpait Outpaint Post"},"example":[{"base64":"...very long string...","seed":"1050625087","finish_reason":"SUCCESS"}]},"application/json async":{"example":{"job_id":1,"job_type":"Text to Image"}},"image/png":{"example":"PNG bytes, what did you expect?"}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/v2/generation/image-inpait-outpaint":{"post":{"summary":"Img Inpaint Or Outpaint V2","operationId":"img_inpaint_or_outpaint_v2_v2_generation_image_inpait_outpaint_post","parameters":[{"name":"accept","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes","title":"Accept"},"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes"},{"name":"accept","in":"header","required":false,"schema":{"type":"string","title":"Accept"}}],"requestBody":{"required":true,"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ImgInpaintOrOutpaintRequestJson"}}}},"responses":{"200":{"description":"PNG bytes if request's 'Accept' header is 'image/png', otherwise JSON","content":{"application/json":{"schema":{"anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/GeneratedImageResult"}},{"$ref":"#/components/schemas/AsyncJobResponse"}],"title":"Response Img Inpaint Or Outpaint V2 V2 Generation Image Inpait Outpaint Post"},"example":[{"base64":"...very long string...","seed":"1050625087","finish_reason":"SUCCESS"}]},"application/json async":{"example":{"job_id":1,"job_type":"Text to Image"}},"image/png":{"example":"PNG bytes, what did you expect?"}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/v1/generation/image-prompt":{"post":{"summary":"Img Prompt","operationId":"img_prompt_v1_generation_image_prompt_post","parameters":[{"name":"accept","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes","title":"Accept"},"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes"},{"name":"accept","in":"header","required":false,"schema":{"type":"string","title":"Accept"}}],"requestBody":{"content":{"multipart/form-data":{"schema":{"allOf":[{"$ref":"#/components/schemas/Body_img_prompt_v1_generation_image_prompt_post"}],"title":"Body"}}}},"responses":{"200":{"description":"PNG bytes if request's 'Accept' header is 'image/png', otherwise JSON","content":{"application/json":{"schema":{"anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/GeneratedImageResult"}},{"$ref":"#/components/schemas/AsyncJobResponse"}],"title":"Response Img Prompt V1 Generation Image Prompt Post"},"example":[{"base64":"...very long string...","seed":"1050625087","finish_reason":"SUCCESS"}]},"application/json async":{"example":{"job_id":1,"job_type":"Text to Image"}},"image/png":{"example":"PNG bytes, what did you expect?"}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/v2/generation/image-prompt":{"post":{"summary":"Img Prompt","operationId":"img_prompt_v2_generation_image_prompt_post","parameters":[{"name":"accept","in":"query","required":false,"schema":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes","title":"Accept"},"description":"Parameter to overvide 'Accept' header, 'image/png' for output bytes"},{"name":"accept","in":"header","required":false,"schema":{"type":"string","title":"Accept"}}],"requestBody":{"required":true,"content":{"application/json":{"schema":{"$ref":"#/components/schemas/ImgPromptRequestJson"}}}},"responses":{"200":{"description":"PNG bytes if request's 'Accept' header is 'image/png', otherwise JSON","content":{"application/json":{"schema":{"anyOf":[{"type":"array","items":{"$ref":"#/components/schemas/GeneratedImageResult"}},{"$ref":"#/components/schemas/AsyncJobResponse"}],"title":"Response Img Prompt V2 Generation Image Prompt Post"},"example":[{"base64":"...very long string...","seed":"1050625087","finish_reason":"SUCCESS"}]},"application/json async":{"example":{"job_id":1,"job_type":"Text to Image"}},"image/png":{"example":"PNG bytes, what did you expect?"}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/v1/generation/query-job":{"get":{"summary":"Query Job","description":"Query async generation job","operationId":"query_job_v1_generation_query_job_get","parameters":[{"name":"job_id","in":"query","required":true,"schema":{"type":"string","title":"Job Id"}},{"name":"require_step_preivew","in":"query","required":false,"schema":{"type":"boolean","default":false,"title":"Require Step Preivew"}}],"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/AsyncJobResponse"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/v1/generation/job-queue":{"get":{"summary":"Job Queue","description":"Query job queue info","operationId":"job_queue_v1_generation_job_queue_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobQueueInfo"}}}}}}},"/v1/generation/job-history":{"get":{"summary":"Get History","description":"Query historical job data","operationId":"get_history_v1_generation_job_history_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/JobHistoryResponse"}}}}}}},"/v1/generation/stop":{"post":{"summary":"Stop","description":"Job stoping","operationId":"stop_v1_generation_stop_post","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/StopResponse"}}}}}}},"/v1/tools/describe-image":{"post":{"summary":"Describe Image","operationId":"describe_image_v1_tools_describe_image_post","parameters":[{"name":"type","in":"query","required":false,"schema":{"allOf":[{"$ref":"#/components/schemas/DescribeImageType"}],"description":"Image type, 'Photo' or 'Anime'","default":"Photo","title":"Type"},"description":"Image type, 'Photo' or 'Anime'"}],"requestBody":{"required":true,"content":{"multipart/form-data":{"schema":{"$ref":"#/components/schemas/Body_describe_image_v1_tools_describe_image_post"}}}},"responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/DescribeImageResponse"}}}},"422":{"description":"Validation Error","content":{"application/json":{"schema":{"$ref":"#/components/schemas/HTTPValidationError"}}}}}}},"/v1/engines/all-models":{"get":{"summary":"All Models","description":"Get all filenames of base model and lora","operationId":"all_models_v1_engines_all_models_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/AllModelNamesResponse"}}}}}}},"/v1/engines/refresh-models":{"post":{"summary":"Refresh Models","description":"Refresh local files and get all filenames of base model and lora","operationId":"refresh_models_v1_engines_refresh_models_post","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"$ref":"#/components/schemas/AllModelNamesResponse"}}}}}}},"/v1/engines/styles":{"get":{"summary":"All Styles","description":"Get all legal Fooocus styles","operationId":"all_styles_v1_engines_styles_get","responses":{"200":{"description":"Successful Response","content":{"application/json":{"schema":{"items":{"type":"string"},"type":"array","title":"Response All Styles V1 Engines Styles Get"}}}}}}}},"components":{"schemas":{"AdvancedParams":{"properties":{"disable_preview":{"type":"boolean","title":"Disable Preview","description":"Disable preview during generation","default":false},"adm_scaler_positive":{"type":"number","maximum":3.0,"minimum":0.1,"title":"Adm Scaler Positive","description":"Positive ADM Guidance Scaler","default":1.5},"adm_scaler_negative":{"type":"number","maximum":3.0,"minimum":0.1,"title":"Adm Scaler Negative","description":"Negative ADM Guidance Scaler","default":0.8},"adm_scaler_end":{"type":"number","maximum":1.0,"minimum":0.0,"title":"Adm Scaler End","description":"ADM Guidance End At Step","default":0.3},"refiner_swap_method":{"type":"string","title":"Refiner Swap Method","description":"Refiner swap method","default":"joint"},"adaptive_cfg":{"type":"number","maximum":30.0,"minimum":1.0,"title":"Adaptive Cfg","description":"CFG Mimicking from TSNR","default":7.0},"sampler_name":{"type":"string","title":"Sampler Name","description":"Sampler","default":"dpmpp_2m_sde_gpu"},"scheduler_name":{"type":"string","title":"Scheduler Name","description":"Scheduler","default":"karras"},"overwrite_step":{"type":"integer","maximum":200.0,"minimum":-1.0,"title":"Overwrite Step","description":"Forced Overwrite of Sampling Step","default":-1},"overwrite_switch":{"type":"integer","maximum":200.0,"minimum":-1.0,"title":"Overwrite Switch","description":"Forced Overwrite of Refiner Switch Step","default":-1},"overwrite_width":{"type":"integer","maximum":2048.0,"minimum":-1.0,"title":"Overwrite Width","description":"Forced Overwrite of Generating Width","default":-1},"overwrite_height":{"type":"integer","maximum":2048.0,"minimum":-1.0,"title":"Overwrite Height","description":"Forced Overwrite of Generating Height","default":-1},"overwrite_vary_strength":{"type":"number","maximum":1.0,"minimum":-1.0,"title":"Overwrite Vary Strength","description":"Forced Overwrite of Denoising Strength of \"Vary\"","default":-1},"overwrite_upscale_strength":{"type":"number","maximum":1.0,"minimum":-1.0,"title":"Overwrite Upscale Strength","description":"Forced Overwrite of Denoising Strength of \"Upscale\"","default":-1},"mixing_image_prompt_and_vary_upscale":{"type":"boolean","title":"Mixing Image Prompt And Vary Upscale","description":"Mixing Image Prompt and Vary/Upscale","default":false},"mixing_image_prompt_and_inpaint":{"type":"boolean","title":"Mixing Image Prompt And Inpaint","description":"Mixing Image Prompt and Inpaint","default":false},"debugging_cn_preprocessor":{"type":"boolean","title":"Debugging Cn Preprocessor","description":"Debug Preprocessors","default":false},"skipping_cn_preprocessor":{"type":"boolean","title":"Skipping Cn Preprocessor","description":"Skip Preprocessors","default":false},"controlnet_softness":{"type":"number","maximum":1.0,"minimum":0.0,"title":"Controlnet Softness","description":"Softness of ControlNet","default":0.25},"canny_low_threshold":{"type":"integer","maximum":255.0,"minimum":1.0,"title":"Canny Low Threshold","description":"Canny Low Threshold","default":64},"canny_high_threshold":{"type":"integer","maximum":255.0,"minimum":1.0,"title":"Canny High Threshold","description":"Canny High Threshold","default":128},"freeu_enabled":{"type":"boolean","title":"Freeu Enabled","description":"FreeU enabled","default":false},"freeu_b1":{"type":"number","title":"Freeu B1","description":"FreeU B1","default":1.01},"freeu_b2":{"type":"number","title":"Freeu B2","description":"FreeU B2","default":1.02},"freeu_s1":{"type":"number","title":"Freeu S1","description":"FreeU B3","default":0.99},"freeu_s2":{"type":"number","title":"Freeu S2","description":"FreeU B4","default":0.95},"debugging_inpaint_preprocessor":{"type":"boolean","title":"Debugging Inpaint Preprocessor","description":"Debug Inpaint Preprocessing","default":false},"inpaint_disable_initial_latent":{"type":"boolean","title":"Inpaint Disable Initial Latent","description":"Disable initial latent in inpaint","default":false},"inpaint_engine":{"type":"string","title":"Inpaint Engine","description":"Inpaint Engine","default":"v1"},"inpaint_strength":{"type":"number","maximum":1.0,"minimum":0.0,"title":"Inpaint Strength","description":"Inpaint Denoising Strength","default":1.0},"inpaint_respective_field":{"type":"number","maximum":1.0,"minimum":0.0,"title":"Inpaint Respective Field","description":"Inpaint Respective Field","default":1.0},"invert_mask_checkbox":{"type":"boolean","title":"Invert Mask Checkbox","description":"Invert Mask","default":false},"inpaint_erode_or_dilate":{"type":"integer","maximum":64.0,"minimum":-64.0,"title":"Inpaint Erode Or Dilate","description":"Mask Erode or Dilate","default":0}},"type":"object","title":"AdvancedParams"},"AllModelNamesResponse":{"properties":{"model_filenames":{"items":{"type":"string"},"type":"array","title":"Model Filenames","description":"All available model filenames"},"lora_filenames":{"items":{"type":"string"},"type":"array","title":"Lora Filenames","description":"All available lora filenames"}},"type":"object","required":["model_filenames","lora_filenames"],"title":"AllModelNamesResponse"},"AsyncJobResponse":{"properties":{"job_id":{"type":"string","title":"Job Id","description":"Job ID"},"job_type":{"allOf":[{"$ref":"#/components/schemas/TaskType"}],"description":"Job type"},"job_stage":{"allOf":[{"$ref":"#/components/schemas/AsyncJobStage"}],"description":"Job running stage"},"job_progress":{"type":"integer","title":"Job Progress","description":"Job running progress, 100 is for finished."},"job_status":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Job Status","description":"Job running status in text"},"job_step_preview":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Job Step Preview","description":"Preview image of generation steps at current time, as base64 image"},"job_result":{"anyOf":[{"items":{"$ref":"#/components/schemas/GeneratedImageResult"},"type":"array"},{"type":"null"}],"title":"Job Result","description":"Job generation result"}},"type":"object","required":["job_id","job_type","job_stage","job_progress"],"title":"AsyncJobResponse"},"AsyncJobStage":{"type":"string","enum":["WAITING","RUNNING","SUCCESS","ERROR"],"title":"AsyncJobStage"},"Body_describe_image_v1_tools_describe_image_post":{"properties":{"image":{"type":"string","format":"binary","title":"Image"}},"type":"object","required":["image"],"title":"Body_describe_image_v1_tools_describe_image_post"},"Body_img_inpaint_or_outpaint_v1_generation_image_inpait_outpaint_post":{"properties":{"input_image":{"type":"string","format":"binary","title":"Input Image","description":"Init image for inpaint or outpaint"},"input_mask":{"type":"string","format":"binary","title":"Input Mask","description":"Inpaint or outpaint mask"},"inpaint_additional_prompt":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Inpaint Additional Prompt","description":"Describe what you want to inpaint"},"outpaint_selections":{"items":{"type":"string"},"type":"array","title":"Outpaint Selections","description":"Outpaint expansion selections, literal 'Left', 'Right', 'Top', 'Bottom' seperated by comma","default":[]},"outpaint_distance_left":{"type":"integer","title":"Outpaint Distance Left","description":"Set outpaint left distance, -1 for default","default":0},"outpaint_distance_right":{"type":"integer","title":"Outpaint Distance Right","description":"Set outpaint right distance, -1 for default","default":0},"outpaint_distance_top":{"type":"integer","title":"Outpaint Distance Top","description":"Set outpaint top distance, -1 for default","default":0},"outpaint_distance_bottom":{"type":"integer","title":"Outpaint Distance Bottom","description":"Set outpaint bottom distance, -1 for default","default":0},"prompt":{"type":"string","title":"Prompt","default":""},"negative_prompt":{"type":"string","title":"Negative Prompt","default":""},"style_selections":{"items":{"type":"string"},"type":"array","title":"Style Selections","description":"Fooocus style selections, seperated by comma","default":["Fooocus V2","Fooocus Enhance","Fooocus Sharp"]},"performance_selection":{"allOf":[{"$ref":"#/components/schemas/PerfomanceSelection"}],"default":"Speed"},"aspect_ratios_selection":{"type":"string","title":"Aspect Ratios Selection","default":"1152*896"},"image_number":{"type":"integer","maximum":32.0,"minimum":1.0,"title":"Image Number","description":"Image number","default":1},"image_seed":{"type":"integer","title":"Image Seed","description":"Seed to generate image, -1 for random","default":-1},"sharpness":{"type":"number","maximum":30.0,"minimum":0.0,"title":"Sharpness","default":2.0},"guidance_scale":{"type":"number","maximum":30.0,"minimum":1.0,"title":"Guidance Scale","default":4.0},"base_model_name":{"type":"string","title":"Base Model Name","default":"juggernautXL_version6Rundiffusion.safetensors"},"refiner_model_name":{"type":"string","title":"Refiner Model Name","default":"None"},"refiner_switch":{"type":"number","maximum":1.0,"minimum":0.1,"title":"Refiner Switch","description":"Refiner Switch At","default":0.5},"loras":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Loras","description":"Lora config in JSON. Format as [{\"model_name\": \"sd_xl_offset_example-lora_1.0.safetensors\", \"weight\": 0.5}]","default":"[{\"model_name\":\"sd_xl_offset_example-lora_1.0.safetensors\",\"weight\":0.1}]"},"advanced_params":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Advanced Params","description":"Advanced parameters in JSON"},"require_base64":{"type":"boolean","title":"Require Base64","description":"Return base64 data of generated image","default":false},"async_process":{"type":"boolean","title":"Async Process","description":"Set to true will run async and return job info for retrieve generataion result later","default":false}},"type":"object","required":["input_image"],"title":"Body_img_inpaint_or_outpaint_v1_generation_image_inpait_outpaint_post"},"Body_img_prompt_v1_generation_image_prompt_post":{"properties":{"cn_img1":{"type":"string","format":"binary","title":"Cn Img1","description":"Input image for image prompt"},"input_image":{"type":"string","format":"binary","title":"Input Image","description":"Init image for inpaint or outpaint"},"input_mask":{"type":"string","format":"binary","title":"Input Mask","description":"Inpaint or outpaint mask"},"inpaint_additional_prompt":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Inpaint Additional Prompt","description":"Describe what you want to inpaint"},"outpaint_selections":{"items":{"type":"string"},"type":"array","title":"Outpaint Selections","description":"Outpaint expansion selections, literal 'Left', 'Right', 'Top', 'Bottom' seperated by comma","default":[]},"outpaint_distance_left":{"type":"integer","title":"Outpaint Distance Left","description":"Set outpaint left distance, 0 for default","default":0},"outpaint_distance_right":{"type":"integer","title":"Outpaint Distance Right","description":"Set outpaint right distance, 0 for default","default":0},"outpaint_distance_top":{"type":"integer","title":"Outpaint Distance Top","description":"Set outpaint top distance, 0 for default","default":0},"outpaint_distance_bottom":{"type":"integer","title":"Outpaint Distance Bottom","description":"Set outpaint bottom distance, 0 for default","default":0},"cn_stop1":{"anyOf":[{"type":"number","maximum":1.0,"minimum":0.0},{"type":"null"}],"title":"Cn Stop1","description":"Stop at for image prompt, None for default value"},"cn_weight1":{"anyOf":[{"type":"number","maximum":2.0,"minimum":0.0},{"type":"null"}],"title":"Cn Weight1","description":"Weight for image prompt, None for default value"},"cn_type1":{"allOf":[{"$ref":"#/components/schemas/ControlNetType"}],"description":"ControlNet type for image prompt","default":"ImagePrompt"},"cn_img2":{"type":"string","format":"binary","title":"Cn Img2","description":"Input image for image prompt"},"cn_stop2":{"anyOf":[{"type":"number","maximum":1.0,"minimum":0.0},{"type":"null"}],"title":"Cn Stop2","description":"Stop at for image prompt, None for default value"},"cn_weight2":{"anyOf":[{"type":"number","maximum":2.0,"minimum":0.0},{"type":"null"}],"title":"Cn Weight2","description":"Weight for image prompt, None for default value"},"cn_type2":{"allOf":[{"$ref":"#/components/schemas/ControlNetType"}],"description":"ControlNet type for image prompt","default":"ImagePrompt"},"cn_img3":{"type":"string","format":"binary","title":"Cn Img3","description":"Input image for image prompt"},"cn_stop3":{"anyOf":[{"type":"number","maximum":1.0,"minimum":0.0},{"type":"null"}],"title":"Cn Stop3","description":"Stop at for image prompt, None for default value"},"cn_weight3":{"anyOf":[{"type":"number","maximum":2.0,"minimum":0.0},{"type":"null"}],"title":"Cn Weight3","description":"Weight for image prompt, None for default value"},"cn_type3":{"allOf":[{"$ref":"#/components/schemas/ControlNetType"}],"description":"ControlNet type for image prompt","default":"ImagePrompt"},"cn_img4":{"type":"string","format":"binary","title":"Cn Img4","description":"Input image for image prompt"},"cn_stop4":{"anyOf":[{"type":"number","maximum":1.0,"minimum":0.0},{"type":"null"}],"title":"Cn Stop4","description":"Stop at for image prompt, None for default value"},"cn_weight4":{"anyOf":[{"type":"number","maximum":2.0,"minimum":0.0},{"type":"null"}],"title":"Cn Weight4","description":"Weight for image prompt, None for default value"},"cn_type4":{"allOf":[{"$ref":"#/components/schemas/ControlNetType"}],"description":"ControlNet type for image prompt","default":"ImagePrompt"},"prompt":{"type":"string","title":"Prompt","default":""},"negative_prompt":{"type":"string","title":"Negative Prompt","default":""},"style_selections":{"items":{"type":"string"},"type":"array","title":"Style Selections","description":"Fooocus style selections, seperated by comma","default":["Fooocus V2","Fooocus Enhance","Fooocus Sharp"]},"performance_selection":{"allOf":[{"$ref":"#/components/schemas/PerfomanceSelection"}],"default":"Speed"},"aspect_ratios_selection":{"type":"string","title":"Aspect Ratios Selection","default":"1152*896"},"image_number":{"type":"integer","maximum":32.0,"minimum":1.0,"title":"Image Number","description":"Image number","default":1},"image_seed":{"type":"integer","title":"Image Seed","description":"Seed to generate image, -1 for random","default":-1},"sharpness":{"type":"number","maximum":30.0,"minimum":0.0,"title":"Sharpness","default":2.0},"guidance_scale":{"type":"number","maximum":30.0,"minimum":1.0,"title":"Guidance Scale","default":4.0},"base_model_name":{"type":"string","title":"Base Model Name","default":"juggernautXL_version6Rundiffusion.safetensors"},"refiner_model_name":{"type":"string","title":"Refiner Model Name","default":"None"},"refiner_switch":{"type":"number","maximum":1.0,"minimum":0.1,"title":"Refiner Switch","description":"Refiner Switch At","default":0.5},"loras":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Loras","description":"Lora config in JSON. Format as [{\"model_name\": \"sd_xl_offset_example-lora_1.0.safetensors\", \"weight\": 0.5}]","default":"[{\"model_name\":\"sd_xl_offset_example-lora_1.0.safetensors\",\"weight\":0.1}]"},"advanced_params":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Advanced Params","description":"Advanced parameters in JSON"},"require_base64":{"type":"boolean","title":"Require Base64","description":"Return base64 data of generated image","default":false},"async_process":{"type":"boolean","title":"Async Process","description":"Set to true will run async and return job info for retrieve generataion result later","default":false}},"type":"object","title":"Body_img_prompt_v1_generation_image_prompt_post"},"Body_img_upscale_or_vary_v1_generation_image_upscale_vary_post":{"properties":{"input_image":{"type":"string","format":"binary","title":"Input Image","description":"Init image for upsacale or outpaint"},"uov_method":{"$ref":"#/components/schemas/UpscaleOrVaryMethod"},"upscale_value":{"anyOf":[{"type":"number","maximum":5.0,"minimum":1.0},{"type":"null"}],"title":"Upscale Value","description":"Upscale custom value, None for default value"},"prompt":{"type":"string","title":"Prompt","default":""},"negative_prompt":{"type":"string","title":"Negative Prompt","default":""},"style_selections":{"items":{"type":"string"},"type":"array","title":"Style Selections","description":"Fooocus style selections, seperated by comma","default":["Fooocus V2","Fooocus Enhance","Fooocus Sharp"]},"performance_selection":{"allOf":[{"$ref":"#/components/schemas/PerfomanceSelection"}],"default":"Speed"},"aspect_ratios_selection":{"type":"string","title":"Aspect Ratios Selection","default":"1152*896"},"image_number":{"type":"integer","maximum":32.0,"minimum":1.0,"title":"Image Number","description":"Image number","default":1},"image_seed":{"type":"integer","title":"Image Seed","description":"Seed to generate image, -1 for random","default":-1},"sharpness":{"type":"number","maximum":30.0,"minimum":0.0,"title":"Sharpness","default":2.0},"guidance_scale":{"type":"number","maximum":30.0,"minimum":1.0,"title":"Guidance Scale","default":4.0},"base_model_name":{"type":"string","title":"Base Model Name","default":"juggernautXL_version6Rundiffusion.safetensors"},"refiner_model_name":{"type":"string","title":"Refiner Model Name","default":"None"},"refiner_switch":{"type":"number","maximum":1.0,"minimum":0.1,"title":"Refiner Switch","description":"Refiner Switch At","default":0.5},"loras":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Loras","description":"Lora config in JSON. Format as [{\"model_name\": \"sd_xl_offset_example-lora_1.0.safetensors\", \"weight\": 0.5}]","default":"[{\"model_name\":\"sd_xl_offset_example-lora_1.0.safetensors\",\"weight\":0.1}]"},"advanced_params":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Advanced Params","description":"Advanced parameters in JSON"},"require_base64":{"type":"boolean","title":"Require Base64","description":"Return base64 data of generated image","default":false},"async_process":{"type":"boolean","title":"Async Process","description":"Set to true will run async and return job info for retrieve generataion result later","default":false}},"type":"object","required":["input_image","uov_method"],"title":"Body_img_upscale_or_vary_v1_generation_image_upscale_vary_post"},"ControlNetType":{"type":"string","enum":["ImagePrompt","FaceSwap","PyraCanny","CPDS"],"title":"ControlNetType"},"DescribeImageResponse":{"properties":{"describe":{"type":"string","title":"Describe"}},"type":"object","required":["describe"],"title":"DescribeImageResponse"},"DescribeImageType":{"type":"string","enum":["Photo","Anime"],"title":"DescribeImageType"},"GeneratedImageResult":{"properties":{"base64":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Base64","description":"Image encoded in base64, or null if finishReasen is not 'SUCCESS', only return when request require base64"},"url":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Url","description":"Image file static serve url, or null if finishReasen is not 'SUCCESS'"},"seed":{"type":"string","title":"Seed","description":"The seed associated with this image"},"finish_reason":{"$ref":"#/components/schemas/GenerationFinishReason"}},"type":"object","required":["base64","url","seed","finish_reason"],"title":"GeneratedImageResult"},"GenerationFinishReason":{"type":"string","enum":["SUCCESS","QUEUE_IS_FULL","USER_CANCEL","ERROR"],"title":"GenerationFinishReason"},"HTTPValidationError":{"properties":{"detail":{"items":{"$ref":"#/components/schemas/ValidationError"},"type":"array","title":"Detail"}},"type":"object","title":"HTTPValidationError"},"ImagePrompt":{"properties":{"cn_img":{"anyOf":[{"type":"string","format":"binary"},{"type":"null"}],"title":"Cn Img"},"cn_stop":{"anyOf":[{"type":"number","maximum":1.0,"minimum":0.0},{"type":"null"}],"title":"Cn Stop"},"cn_weight":{"anyOf":[{"type":"number","maximum":2.0,"minimum":0.0},{"type":"null"}],"title":"Cn Weight","description":"None for default value"},"cn_type":{"allOf":[{"$ref":"#/components/schemas/ControlNetType"}],"default":"ImagePrompt"}},"type":"object","title":"ImagePrompt"},"ImagePromptJson":{"properties":{"cn_img":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Cn Img","description":"Input image for image prompt as base64"},"cn_stop":{"anyOf":[{"type":"number","maximum":1.0,"minimum":0.0},{"type":"null"}],"title":"Cn Stop","description":"Stop at for image prompt, 0 for default value","default":0},"cn_weight":{"anyOf":[{"type":"number","maximum":2.0,"minimum":0.0},{"type":"null"}],"title":"Cn Weight","description":"Weight for image prompt, 0 for default value","default":0},"cn_type":{"allOf":[{"$ref":"#/components/schemas/ControlNetType"}],"description":"ControlNet type for image prompt","default":"ImagePrompt"}},"type":"object","title":"ImagePromptJson"},"ImgInpaintOrOutpaintRequestJson":{"properties":{"prompt":{"type":"string","title":"Prompt","default":""},"negative_prompt":{"type":"string","title":"Negative Prompt","default":""},"style_selections":{"items":{"type":"string"},"type":"array","title":"Style Selections","default":["Fooocus V2","Fooocus Enhance","Fooocus Sharp"]},"performance_selection":{"allOf":[{"$ref":"#/components/schemas/PerfomanceSelection"}],"default":"Speed"},"aspect_ratios_selection":{"type":"string","title":"Aspect Ratios Selection","default":"1152*896"},"image_number":{"type":"integer","maximum":32.0,"minimum":1.0,"title":"Image Number","description":"Image number","default":1},"image_seed":{"type":"integer","title":"Image Seed","description":"Seed to generate image, -1 for random","default":-1},"sharpness":{"type":"number","maximum":30.0,"minimum":0.0,"title":"Sharpness","default":2.0},"guidance_scale":{"type":"number","maximum":30.0,"minimum":1.0,"title":"Guidance Scale","default":4.0},"base_model_name":{"type":"string","title":"Base Model Name","default":"juggernautXL_version6Rundiffusion.safetensors"},"refiner_model_name":{"type":"string","title":"Refiner Model Name","default":"None"},"refiner_switch":{"type":"number","maximum":1.0,"minimum":0.1,"title":"Refiner Switch","description":"Refiner Switch At","default":0.5},"loras":{"items":{"$ref":"#/components/schemas/Lora"},"type":"array","title":"Loras","default":[{"model_name":"sd_xl_offset_example-lora_1.0.safetensors","weight":0.1}]},"advanced_params":{"anyOf":[{"$ref":"#/components/schemas/AdvancedParams"},{"type":"null"}],"default":{"adaptive_cfg":7.0,"adm_scaler_end":0.3,"adm_scaler_negative":0.8,"adm_scaler_positive":1.5,"canny_high_threshold":128,"canny_low_threshold":64,"controlnet_softness":0.25,"debugging_cn_preprocessor":false,"debugging_inpaint_preprocessor":false,"disable_preview":false,"freeu_b1":1.01,"freeu_b2":1.02,"freeu_enabled":false,"freeu_s1":0.99,"freeu_s2":0.95,"inpaint_disable_initial_latent":false,"inpaint_engine":"v1","inpaint_erode_or_dilate":0,"inpaint_respective_field":1.0,"inpaint_strength":1.0,"invert_mask_checkbox":false,"mixing_image_prompt_and_inpaint":false,"mixing_image_prompt_and_vary_upscale":false,"overwrite_height":-1,"overwrite_step":-1,"overwrite_switch":-1,"overwrite_upscale_strength":-1.0,"overwrite_vary_strength":-1.0,"overwrite_width":-1,"refiner_swap_method":"joint","sampler_name":"dpmpp_2m_sde_gpu","scheduler_name":"karras","skipping_cn_preprocessor":false}},"require_base64":{"type":"boolean","title":"Require Base64","description":"Return base64 data of generated image","default":false},"async_process":{"type":"boolean","title":"Async Process","description":"Set to true will run async and return job info for retrieve generataion result later","default":false},"webhook_url":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Webhook Url","description":"Optional URL for a webhook callback. If provided, the system will send a POST request to this URL upon task completion or failure. This allows for asynchronous notification of task status."},"input_image":{"type":"string","title":"Input Image","description":"Init image for inpaint or outpaint as base64"},"input_mask":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Input Mask","description":"Inpaint or outpaint mask as base64","default":""},"inpaint_additional_prompt":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Inpaint Additional Prompt","description":"Describe what you want to inpaint","default":""},"outpaint_selections":{"items":{"$ref":"#/components/schemas/OutpaintExpansion"},"type":"array","title":"Outpaint Selections","default":[]},"outpaint_distance_left":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Outpaint Distance Left","description":"Set outpaint left distance","default":-1},"outpaint_distance_right":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Outpaint Distance Right","description":"Set outpaint right distance","default":-1},"outpaint_distance_top":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Outpaint Distance Top","description":"Set outpaint top distance","default":-1},"outpaint_distance_bottom":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Outpaint Distance Bottom","description":"Set outpaint bottom distance","default":-1}},"type":"object","required":["input_image"],"title":"ImgInpaintOrOutpaintRequestJson"},"ImgPromptRequestJson":{"properties":{"prompt":{"type":"string","title":"Prompt","default":""},"negative_prompt":{"type":"string","title":"Negative Prompt","default":""},"style_selections":{"items":{"type":"string"},"type":"array","title":"Style Selections","default":["Fooocus V2","Fooocus Enhance","Fooocus Sharp"]},"performance_selection":{"allOf":[{"$ref":"#/components/schemas/PerfomanceSelection"}],"default":"Speed"},"aspect_ratios_selection":{"type":"string","title":"Aspect Ratios Selection","default":"1152*896"},"image_number":{"type":"integer","maximum":32.0,"minimum":1.0,"title":"Image Number","description":"Image number","default":1},"image_seed":{"type":"integer","title":"Image Seed","description":"Seed to generate image, -1 for random","default":-1},"sharpness":{"type":"number","maximum":30.0,"minimum":0.0,"title":"Sharpness","default":2.0},"guidance_scale":{"type":"number","maximum":30.0,"minimum":1.0,"title":"Guidance Scale","default":4.0},"base_model_name":{"type":"string","title":"Base Model Name","default":"juggernautXL_version6Rundiffusion.safetensors"},"refiner_model_name":{"type":"string","title":"Refiner Model Name","default":"None"},"refiner_switch":{"type":"number","maximum":1.0,"minimum":0.1,"title":"Refiner Switch","description":"Refiner Switch At","default":0.5},"loras":{"items":{"$ref":"#/components/schemas/Lora"},"type":"array","title":"Loras","default":[{"model_name":"sd_xl_offset_example-lora_1.0.safetensors","weight":0.1}]},"advanced_params":{"anyOf":[{"$ref":"#/components/schemas/AdvancedParams"},{"type":"null"}],"default":{"adaptive_cfg":7.0,"adm_scaler_end":0.3,"adm_scaler_negative":0.8,"adm_scaler_positive":1.5,"canny_high_threshold":128,"canny_low_threshold":64,"controlnet_softness":0.25,"debugging_cn_preprocessor":false,"debugging_inpaint_preprocessor":false,"disable_preview":false,"freeu_b1":1.01,"freeu_b2":1.02,"freeu_enabled":false,"freeu_s1":0.99,"freeu_s2":0.95,"inpaint_disable_initial_latent":false,"inpaint_engine":"v1","inpaint_erode_or_dilate":0,"inpaint_respective_field":1.0,"inpaint_strength":1.0,"invert_mask_checkbox":false,"mixing_image_prompt_and_inpaint":false,"mixing_image_prompt_and_vary_upscale":false,"overwrite_height":-1,"overwrite_step":-1,"overwrite_switch":-1,"overwrite_upscale_strength":-1.0,"overwrite_vary_strength":-1.0,"overwrite_width":-1,"refiner_swap_method":"joint","sampler_name":"dpmpp_2m_sde_gpu","scheduler_name":"karras","skipping_cn_preprocessor":false}},"require_base64":{"type":"boolean","title":"Require Base64","description":"Return base64 data of generated image","default":false},"async_process":{"type":"boolean","title":"Async Process","description":"Set to true will run async and return job info for retrieve generataion result later","default":false},"webhook_url":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Webhook Url","description":"Optional URL for a webhook callback. If provided, the system will send a POST request to this URL upon task completion or failure. This allows for asynchronous notification of task status."},"input_image":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Input Image","description":"Init image for inpaint or outpaint as base64"},"input_mask":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Input Mask","description":"Inpaint or outpaint mask as base64","default":""},"inpaint_additional_prompt":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Inpaint Additional Prompt","description":"Describe what you want to inpaint","default":""},"outpaint_selections":{"items":{"$ref":"#/components/schemas/OutpaintExpansion"},"type":"array","title":"Outpaint Selections","default":[]},"outpaint_distance_left":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Outpaint Distance Left","description":"Set outpaint left distance","default":-1},"outpaint_distance_right":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Outpaint Distance Right","description":"Set outpaint right distance","default":-1},"outpaint_distance_top":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Outpaint Distance Top","description":"Set outpaint top distance","default":-1},"outpaint_distance_bottom":{"anyOf":[{"type":"integer"},{"type":"null"}],"title":"Outpaint Distance Bottom","description":"Set outpaint bottom distance","default":-1},"image_prompts":{"items":{"anyOf":[{"$ref":"#/components/schemas/ImagePromptJson"},{"$ref":"#/components/schemas/ImagePrompt"}]},"type":"array","title":"Image Prompts"}},"type":"object","required":["image_prompts"],"title":"ImgPromptRequestJson"},"ImgUpscaleOrVaryRequestJson":{"properties":{"prompt":{"type":"string","title":"Prompt","default":""},"negative_prompt":{"type":"string","title":"Negative Prompt","default":""},"style_selections":{"items":{"type":"string"},"type":"array","title":"Style Selections","default":["Fooocus V2","Fooocus Enhance","Fooocus Sharp"]},"performance_selection":{"allOf":[{"$ref":"#/components/schemas/PerfomanceSelection"}],"default":"Speed"},"aspect_ratios_selection":{"type":"string","title":"Aspect Ratios Selection","default":"1152*896"},"image_number":{"type":"integer","maximum":32.0,"minimum":1.0,"title":"Image Number","description":"Image number","default":1},"image_seed":{"type":"integer","title":"Image Seed","description":"Seed to generate image, -1 for random","default":-1},"sharpness":{"type":"number","maximum":30.0,"minimum":0.0,"title":"Sharpness","default":2.0},"guidance_scale":{"type":"number","maximum":30.0,"minimum":1.0,"title":"Guidance Scale","default":4.0},"base_model_name":{"type":"string","title":"Base Model Name","default":"juggernautXL_version6Rundiffusion.safetensors"},"refiner_model_name":{"type":"string","title":"Refiner Model Name","default":"None"},"refiner_switch":{"type":"number","maximum":1.0,"minimum":0.1,"title":"Refiner Switch","description":"Refiner Switch At","default":0.5},"loras":{"items":{"$ref":"#/components/schemas/Lora"},"type":"array","title":"Loras","default":[{"model_name":"sd_xl_offset_example-lora_1.0.safetensors","weight":0.1}]},"advanced_params":{"anyOf":[{"$ref":"#/components/schemas/AdvancedParams"},{"type":"null"}],"default":{"adaptive_cfg":7.0,"adm_scaler_end":0.3,"adm_scaler_negative":0.8,"adm_scaler_positive":1.5,"canny_high_threshold":128,"canny_low_threshold":64,"controlnet_softness":0.25,"debugging_cn_preprocessor":false,"debugging_inpaint_preprocessor":false,"disable_preview":false,"freeu_b1":1.01,"freeu_b2":1.02,"freeu_enabled":false,"freeu_s1":0.99,"freeu_s2":0.95,"inpaint_disable_initial_latent":false,"inpaint_engine":"v1","inpaint_erode_or_dilate":0,"inpaint_respective_field":1.0,"inpaint_strength":1.0,"invert_mask_checkbox":false,"mixing_image_prompt_and_inpaint":false,"mixing_image_prompt_and_vary_upscale":false,"overwrite_height":-1,"overwrite_step":-1,"overwrite_switch":-1,"overwrite_upscale_strength":-1.0,"overwrite_vary_strength":-1.0,"overwrite_width":-1,"refiner_swap_method":"joint","sampler_name":"dpmpp_2m_sde_gpu","scheduler_name":"karras","skipping_cn_preprocessor":false}},"require_base64":{"type":"boolean","title":"Require Base64","description":"Return base64 data of generated image","default":false},"async_process":{"type":"boolean","title":"Async Process","description":"Set to true will run async and return job info for retrieve generataion result later","default":false},"webhook_url":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Webhook Url","description":"Optional URL for a webhook callback. If provided, the system will send a POST request to this URL upon task completion or failure. This allows for asynchronous notification of task status."},"uov_method":{"allOf":[{"$ref":"#/components/schemas/UpscaleOrVaryMethod"}],"default":"Upscale (2x)"},"upscale_value":{"anyOf":[{"type":"number","maximum":5.0,"minimum":1.0},{"type":"null"}],"title":"Upscale Value","description":"Upscale custom value, 1.0 for default value","default":1.0},"input_image":{"type":"string","title":"Input Image","description":"Init image for upsacale or outpaint as base64"}},"type":"object","required":["input_image"],"title":"ImgUpscaleOrVaryRequestJson"},"JobHistoryInfo":{"properties":{"job_id":{"type":"string","title":"Job Id"},"is_finished":{"type":"boolean","title":"Is Finished","default":false}},"type":"object","required":["job_id"],"title":"JobHistoryInfo"},"JobHistoryResponse":{"properties":{"queue":{"items":{"$ref":"#/components/schemas/JobHistoryInfo"},"type":"array","title":"Queue","default":[]},"history":{"items":{"$ref":"#/components/schemas/JobHistoryInfo"},"type":"array","title":"History","default":[]}},"type":"object","title":"JobHistoryResponse"},"JobQueueInfo":{"properties":{"running_size":{"type":"integer","title":"Running Size","description":"The current running and waiting job count"},"finished_size":{"type":"integer","title":"Finished Size","description":"Finished job cound (after auto clean)"},"last_job_id":{"type":"string","title":"Last Job Id","description":"Last submit generation job id"}},"type":"object","required":["running_size","finished_size","last_job_id"],"title":"JobQueueInfo"},"Lora":{"properties":{"model_name":{"type":"string","title":"Model Name"},"weight":{"type":"number","maximum":2.0,"minimum":-2.0,"title":"Weight","default":0.5}},"type":"object","required":["model_name"],"title":"Lora"},"OutpaintExpansion":{"type":"string","enum":["Left","Right","Top","Bottom"],"title":"OutpaintExpansion"},"PerfomanceSelection":{"type":"string","enum":["Speed","Quality","Extreme Speed"],"title":"PerfomanceSelection"},"StopResponse":{"properties":{"msg":{"type":"string","title":"Msg"}},"type":"object","required":["msg"],"title":"StopResponse"},"TaskType":{"type":"string","enum":["Text to Image","Image Upscale or Variation","Image Inpaint or Outpaint","Image Prompt","Not Found"],"title":"TaskType"},"Text2ImgRequest":{"properties":{"prompt":{"type":"string","title":"Prompt","default":""},"negative_prompt":{"type":"string","title":"Negative Prompt","default":""},"style_selections":{"items":{"type":"string"},"type":"array","title":"Style Selections","default":["Fooocus V2","Fooocus Enhance","Fooocus Sharp"]},"performance_selection":{"allOf":[{"$ref":"#/components/schemas/PerfomanceSelection"}],"default":"Speed"},"aspect_ratios_selection":{"type":"string","title":"Aspect Ratios Selection","default":"1152*896"},"image_number":{"type":"integer","maximum":32.0,"minimum":1.0,"title":"Image Number","description":"Image number","default":1},"image_seed":{"type":"integer","title":"Image Seed","description":"Seed to generate image, -1 for random","default":-1},"sharpness":{"type":"number","maximum":30.0,"minimum":0.0,"title":"Sharpness","default":2.0},"guidance_scale":{"type":"number","maximum":30.0,"minimum":1.0,"title":"Guidance Scale","default":4.0},"base_model_name":{"type":"string","title":"Base Model Name","default":"juggernautXL_version6Rundiffusion.safetensors"},"refiner_model_name":{"type":"string","title":"Refiner Model Name","default":"None"},"refiner_switch":{"type":"number","maximum":1.0,"minimum":0.1,"title":"Refiner Switch","description":"Refiner Switch At","default":0.5},"loras":{"items":{"$ref":"#/components/schemas/Lora"},"type":"array","title":"Loras","default":[{"model_name":"sd_xl_offset_example-lora_1.0.safetensors","weight":0.1}]},"advanced_params":{"anyOf":[{"$ref":"#/components/schemas/AdvancedParams"},{"type":"null"}],"default":{"adaptive_cfg":7.0,"adm_scaler_end":0.3,"adm_scaler_negative":0.8,"adm_scaler_positive":1.5,"canny_high_threshold":128,"canny_low_threshold":64,"controlnet_softness":0.25,"debugging_cn_preprocessor":false,"debugging_inpaint_preprocessor":false,"disable_preview":false,"freeu_b1":1.01,"freeu_b2":1.02,"freeu_enabled":false,"freeu_s1":0.99,"freeu_s2":0.95,"inpaint_disable_initial_latent":false,"inpaint_engine":"v1","inpaint_erode_or_dilate":0,"inpaint_respective_field":1.0,"inpaint_strength":1.0,"invert_mask_checkbox":false,"mixing_image_prompt_and_inpaint":false,"mixing_image_prompt_and_vary_upscale":false,"overwrite_height":-1,"overwrite_step":-1,"overwrite_switch":-1,"overwrite_upscale_strength":-1.0,"overwrite_vary_strength":-1.0,"overwrite_width":-1,"refiner_swap_method":"joint","sampler_name":"dpmpp_2m_sde_gpu","scheduler_name":"karras","skipping_cn_preprocessor":false}},"require_base64":{"type":"boolean","title":"Require Base64","description":"Return base64 data of generated image","default":false},"async_process":{"type":"boolean","title":"Async Process","description":"Set to true will run async and return job info for retrieve generataion result later","default":false},"webhook_url":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Webhook Url","description":"Optional URL for a webhook callback. If provided, the system will send a POST request to this URL upon task completion or failure. This allows for asynchronous notification of task status."}},"type":"object","title":"Text2ImgRequest"},"Text2ImgRequestWithPrompt":{"properties":{"prompt":{"type":"string","title":"Prompt","default":""},"negative_prompt":{"type":"string","title":"Negative Prompt","default":""},"style_selections":{"items":{"type":"string"},"type":"array","title":"Style Selections","default":["Fooocus V2","Fooocus Enhance","Fooocus Sharp"]},"performance_selection":{"allOf":[{"$ref":"#/components/schemas/PerfomanceSelection"}],"default":"Speed"},"aspect_ratios_selection":{"type":"string","title":"Aspect Ratios Selection","default":"1152*896"},"image_number":{"type":"integer","maximum":32.0,"minimum":1.0,"title":"Image Number","description":"Image number","default":1},"image_seed":{"type":"integer","title":"Image Seed","description":"Seed to generate image, -1 for random","default":-1},"sharpness":{"type":"number","maximum":30.0,"minimum":0.0,"title":"Sharpness","default":2.0},"guidance_scale":{"type":"number","maximum":30.0,"minimum":1.0,"title":"Guidance Scale","default":4.0},"base_model_name":{"type":"string","title":"Base Model Name","default":"juggernautXL_version6Rundiffusion.safetensors"},"refiner_model_name":{"type":"string","title":"Refiner Model Name","default":"None"},"refiner_switch":{"type":"number","maximum":1.0,"minimum":0.1,"title":"Refiner Switch","description":"Refiner Switch At","default":0.5},"loras":{"items":{"$ref":"#/components/schemas/Lora"},"type":"array","title":"Loras","default":[{"model_name":"sd_xl_offset_example-lora_1.0.safetensors","weight":0.1}]},"advanced_params":{"anyOf":[{"$ref":"#/components/schemas/AdvancedParams"},{"type":"null"}],"default":{"adaptive_cfg":7.0,"adm_scaler_end":0.3,"adm_scaler_negative":0.8,"adm_scaler_positive":1.5,"canny_high_threshold":128,"canny_low_threshold":64,"controlnet_softness":0.25,"debugging_cn_preprocessor":false,"debugging_inpaint_preprocessor":false,"disable_preview":false,"freeu_b1":1.01,"freeu_b2":1.02,"freeu_enabled":false,"freeu_s1":0.99,"freeu_s2":0.95,"inpaint_disable_initial_latent":false,"inpaint_engine":"v1","inpaint_erode_or_dilate":0,"inpaint_respective_field":1.0,"inpaint_strength":1.0,"invert_mask_checkbox":false,"mixing_image_prompt_and_inpaint":false,"mixing_image_prompt_and_vary_upscale":false,"overwrite_height":-1,"overwrite_step":-1,"overwrite_switch":-1,"overwrite_upscale_strength":-1.0,"overwrite_vary_strength":-1.0,"overwrite_width":-1,"refiner_swap_method":"joint","sampler_name":"dpmpp_2m_sde_gpu","scheduler_name":"karras","skipping_cn_preprocessor":false}},"require_base64":{"type":"boolean","title":"Require Base64","description":"Return base64 data of generated image","default":false},"async_process":{"type":"boolean","title":"Async Process","description":"Set to true will run async and return job info for retrieve generataion result later","default":false},"webhook_url":{"anyOf":[{"type":"string"},{"type":"null"}],"title":"Webhook Url","description":"Optional URL for a webhook callback. If provided, the system will send a POST request to this URL upon task completion or failure. This allows for asynchronous notification of task status."},"image_prompts":{"items":{"$ref":"#/components/schemas/ImagePromptJson"},"type":"array","title":"Image Prompts","default":[]}},"type":"object","title":"Text2ImgRequestWithPrompt"},"UpscaleOrVaryMethod":{"type":"string","enum":["Vary (Subtle)","Vary (Strong)","Upscale (1.5x)","Upscale (2x)","Upscale (Fast 2x)","Upscale (Custom)"],"title":"UpscaleOrVaryMethod"},"ValidationError":{"properties":{"loc":{"items":{"anyOf":[{"type":"string"},{"type":"integer"}]},"type":"array","title":"Location"},"msg":{"type":"string","title":"Message"},"type":{"type":"string","title":"Error Type"}},"type":"object","required":["loc","msg","type"],"title":"ValidationError"}}}}
Fooocus-API/environment.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ name: fooocus-api
2
+ channels:
3
+ - defaults
4
+ dependencies:
5
+ - python=3.10
6
+ - pip=23.0
7
+ - packaging
Fooocus-API/examples/examples.ipynb ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# text to image"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": null,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "import requests\n",
17
+ "import json\n",
18
+ "\n",
19
+ "# Vincent diagram example\n",
20
+ "host = \"http://127.0.0.1:8888\"\n",
21
+ "\n",
22
+ "def text2img(params: dict) -> dict:\n",
23
+ " \"\"\"\n",
24
+ " Vincentian picture\n",
25
+ " \"\"\"\n",
26
+ " result = requests.post(url=f\"{host}/v1/generation/text-to-image\",\n",
27
+ " data=json.dumps(params),\n",
28
+ " headers={\"Content-Type\": \"application/json\"})\n",
29
+ " return result.json()\n",
30
+ "\n",
31
+ "result =text2img(\n",
32
+ " {\"prompt\": \"1girl sitting on the ground\",\n",
33
+ " \"async_process\": True}\n",
34
+ " )\n",
35
+ "print(result)"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "markdown",
40
+ "metadata": {},
41
+ "source": [
42
+ "# upscale or vary"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": null,
48
+ "metadata": {},
49
+ "outputs": [],
50
+ "source": [
51
+ "import requests\n",
52
+ "import json\n",
53
+ "\n",
54
+ "\n",
55
+ "# upscale or vary v1 Interface example\n",
56
+ "host = \"http://127.0.0.1:8888\"\n",
57
+ "image = open(\"./imgs/bear.jpg\", \"rb\").read()\n",
58
+ "\n",
59
+ "def upscale_vary(image, params: dict) -> dict:\n",
60
+ " \"\"\"\n",
61
+ " Upscale or Vary\n",
62
+ " \"\"\"\n",
63
+ " response = requests.post(url=f\"{host}/v1/generation/image-upscale-vary\",\n",
64
+ " data=params,\n",
65
+ " files={\"input_image\": image})\n",
66
+ " return response.json()\n",
67
+ "\n",
68
+ "result =upscale_vary(image=image,\n",
69
+ " params={\n",
70
+ " \"uov_method\": \"Upscale (2x)\",\n",
71
+ " \"async_process\": True\n",
72
+ " })\n",
73
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "code",
78
+ "execution_count": null,
79
+ "metadata": {},
80
+ "outputs": [],
81
+ "source": [
82
+ "import requests\n",
83
+ "import json\n",
84
+ "import base64\n",
85
+ "\n",
86
+ "\n",
87
+ "# upscale or vary v2 Interface example\n",
88
+ "host = \"http://127.0.0.1:8888\"\n",
89
+ "image = open(\"./imgs/bear.jpg\", \"rb\").read()\n",
90
+ "\n",
91
+ "def upscale_vary(params: dict) -> dict:\n",
92
+ " \"\"\"\n",
93
+ " Upscale or Vary\n",
94
+ " \"\"\"\n",
95
+ " response = requests.post(url=f\"{host}/v2/generation/image-upscale-vary\",\n",
96
+ " data=json.dumps(params),\n",
97
+ " headers={\"Content-Type\": \"application/json\"},\n",
98
+ " timeout=300)\n",
99
+ " return response.json()\n",
100
+ "\n",
101
+ "result =upscale_vary(params={\n",
102
+ " \"input_image\": base64.b64encode(image).decode('utf-8'),\n",
103
+ " \"uov_method\": \"Upscale (2x)\",\n",
104
+ " \"async_process\": True\n",
105
+ " })\n",
106
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
107
+ ]
108
+ },
109
+ {
110
+ "cell_type": "markdown",
111
+ "metadata": {},
112
+ "source": [
113
+ "# inpaint or outpaint"
114
+ ]
115
+ },
116
+ {
117
+ "cell_type": "code",
118
+ "execution_count": null,
119
+ "metadata": {},
120
+ "outputs": [],
121
+ "source": [
122
+ "import requests\n",
123
+ "import json\n",
124
+ "\n",
125
+ "# Partial redraw v1 interface example\n",
126
+ "host = \"http://127.0.0.1:8888\"\n",
127
+ "image = open(\"./imgs/bear.jpg\", \"rb\").read()\n",
128
+ "\n",
129
+ "def inpaint_outpaint(params: dict, input_image: bytes, input_mask: bytes = None) -> dict:\n",
130
+ " \"\"\"\n",
131
+ " Partial redraw v1 interface example\n",
132
+ " \"\"\"\n",
133
+ " response = requests.post(url=f\"{host}/v1/generation/image-inpait-outpaint\",\n",
134
+ " data=params,\n",
135
+ " files={\"input_image\": input_image,\n",
136
+ " \"input_mask\": input_mask})\n",
137
+ " return response.json()\n",
138
+ "\n",
139
+ "\n",
140
+ "# Image extension example\n",
141
+ "result = inpaint_outpaint(params={\n",
142
+ " \"outpaint_selections\": \"Left,Right\",\n",
143
+ " \"async_process\": True},\n",
144
+ " input_image=image,\n",
145
+ " input_mask=None)\n",
146
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
147
+ ]
148
+ },
149
+ {
150
+ "cell_type": "code",
151
+ "execution_count": null,
152
+ "metadata": {},
153
+ "outputs": [],
154
+ "source": [
155
+ "#Partial redraw example\n",
156
+ "source = open(\"./imgs/s.jpg\", \"rb\").read()\n",
157
+ "mask = open(\"./imgs/m.png\", \"rb\").read()\n",
158
+ "result = inpaint_outpaint(params={\n",
159
+ " \"prompt\": \"a cat\",\n",
160
+ " \"async_process\": True},\n",
161
+ " input_image=source,\n",
162
+ " input_mask=mask)\n",
163
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
164
+ ]
165
+ },
166
+ {
167
+ "cell_type": "code",
168
+ "execution_count": null,
169
+ "metadata": {},
170
+ "outputs": [],
171
+ "source": [
172
+ "import requests\n",
173
+ "import json\n",
174
+ "import base64\n",
175
+ "\n",
176
+ "\n",
177
+ "# Partial redraw v2 interface example\n",
178
+ "host = \"http://127.0.0.1:8888\"\n",
179
+ "image = open(\"./imgs/bear.jpg\", \"rb\").read()\n",
180
+ "\n",
181
+ "def inpaint_outpaint(params: dict) -> dict:\n",
182
+ " \"\"\"\n",
183
+ " Partial redraw v2 interface example\n",
184
+ " \"\"\"\n",
185
+ " response = requests.post(url=f\"{host}/v2/generation/image-inpait-outpaint\",\n",
186
+ " data=json.dumps(params),\n",
187
+ " headers={\"Content-Type\": \"application/json\"})\n",
188
+ " return response.json()\n",
189
+ "\n",
190
+ "# Image extension example\n",
191
+ "result = inpaint_outpaint(params={\n",
192
+ " \"input_image\": base64.b64encode(image).decode('utf-8'),\n",
193
+ " \"input_mask\": None,\n",
194
+ " \"outpaint_selections\": [\"Left\", \"Right\"],\n",
195
+ " \"async_process\": True})\n",
196
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
197
+ ]
198
+ },
199
+ {
200
+ "cell_type": "code",
201
+ "execution_count": null,
202
+ "metadata": {},
203
+ "outputs": [],
204
+ "source": [
205
+ "# Partial redraw example\n",
206
+ "source = open(\"./imgs/s.jpg\", \"rb\").read()\n",
207
+ "mask = open(\"./imgs/m.png\", \"rb\").read()\n",
208
+ "result = inpaint_outpaint(params={\n",
209
+ " \"prompt\": \"a cat\",\n",
210
+ " \"input_image\": base64.b64encode(source).decode('utf-8'),\n",
211
+ " \"input_mask\": base64.b64encode(mask).decode('utf-8'),\n",
212
+ " \"async_process\": True})\n",
213
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
214
+ ]
215
+ },
216
+ {
217
+ "cell_type": "markdown",
218
+ "metadata": {},
219
+ "source": [
220
+ "# image prompts"
221
+ ]
222
+ },
223
+ {
224
+ "cell_type": "code",
225
+ "execution_count": null,
226
+ "metadata": {},
227
+ "outputs": [],
228
+ "source": [
229
+ "import requests\n",
230
+ "import json\n",
231
+ "\n",
232
+ "\n",
233
+ "# image_prompt v1 Interface example\n",
234
+ "host = \"http://127.0.0.1:8888\"\n",
235
+ "image = open(\"./imgs/bear.jpg\", \"rb\").read()\n",
236
+ "source = open(\"./imgs/s.jpg\", \"rb\").read()\n",
237
+ "mask = open(\"./imgs/m.png\", \"rb\").read()\n",
238
+ "\n",
239
+ "def image_prompt(params: dict,\n",
240
+ " input_iamge: bytes=None,\n",
241
+ " input_mask: bytes=None,\n",
242
+ " cn_img1: bytes=None,\n",
243
+ " cn_img2: bytes=None,\n",
244
+ " cn_img3: bytes=None,\n",
245
+ " cn_img4: bytes=None,) -> dict:\n",
246
+ " \"\"\"\n",
247
+ " image prompt\n",
248
+ " \"\"\"\n",
249
+ " response = requests.post(url=f\"{host}/v1/generation/image-prompt\",\n",
250
+ " data=params,\n",
251
+ " files={\n",
252
+ " \"input_image\": input_iamge,\n",
253
+ " \"input_mask\": input_mask,\n",
254
+ " \"cn_img1\": cn_img1,\n",
255
+ " \"cn_img2\": cn_img2,\n",
256
+ " \"cn_img3\": cn_img3,\n",
257
+ " \"cn_img4\": cn_img4,\n",
258
+ " })\n",
259
+ " return response.json()\n",
260
+ "\n",
261
+ "# image extension\n",
262
+ "params = {\n",
263
+ " \"outpaint_selections\": [\"Left\", \"Right\"],\n",
264
+ " \"image_prompts\": [] # Required parameters, can be an empty list\n",
265
+ "}\n",
266
+ "result = image_prompt(params=params, input_iamge=image)\n",
267
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
268
+ ]
269
+ },
270
+ {
271
+ "cell_type": "code",
272
+ "execution_count": null,
273
+ "metadata": {},
274
+ "outputs": [],
275
+ "source": [
276
+ "# partial redraw\n",
277
+ "\n",
278
+ "params = {\n",
279
+ " \"prompt\": \"1girl sitting on the chair\",\n",
280
+ " \"image_prompts\": [], # Required parameters, can be an empty list\n",
281
+ " \"async_process\": True\n",
282
+ "}\n",
283
+ "result = image_prompt(params=params, input_iamge=source, input_mask=mask)\n",
284
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
285
+ ]
286
+ },
287
+ {
288
+ "cell_type": "code",
289
+ "execution_count": null,
290
+ "metadata": {},
291
+ "outputs": [],
292
+ "source": [
293
+ "# image prompt\n",
294
+ "\n",
295
+ "params = {\n",
296
+ " \"prompt\": \"1girl sitting on the chair\",\n",
297
+ " \"image_prompts\": [\n",
298
+ " {\n",
299
+ " \"cn_stop\": 0.6,\n",
300
+ " \"cn_weight\": 0.6,\n",
301
+ " \"cn_type\": \"ImagePrompt\"\n",
302
+ " },{\n",
303
+ " \"cn_stop\": 0.6,\n",
304
+ " \"cn_weight\": 0.6,\n",
305
+ " \"cn_type\": \"ImagePrompt\"\n",
306
+ " }]\n",
307
+ " }\n",
308
+ "result = image_prompt(params=params, cn_img1=image, cn_img2=source)\n",
309
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
310
+ ]
311
+ },
312
+ {
313
+ "cell_type": "code",
314
+ "execution_count": null,
315
+ "metadata": {},
316
+ "outputs": [],
317
+ "source": [
318
+ "import requests\n",
319
+ "import json\n",
320
+ "import base64\n",
321
+ "\n",
322
+ "# image_prompt v2 Interface example\n",
323
+ "host = \"http://127.0.0.1:8888\"\n",
324
+ "image = open(\"./imgs/bear.jpg\", \"rb\").read()\n",
325
+ "source = open(\"./imgs/s.jpg\", \"rb\").read()\n",
326
+ "mask = open(\"./imgs/m.png\", \"rb\").read()\n",
327
+ "\n",
328
+ "def image_prompt(params: dict) -> dict:\n",
329
+ " \"\"\"\n",
330
+ " image prompt\n",
331
+ " \"\"\"\n",
332
+ " response = requests.post(url=f\"{host}/v2/generation/image-prompt\",\n",
333
+ " data=json.dumps(params),\n",
334
+ " headers={\"Content-Type\": \"application/json\"})\n",
335
+ " return response.json()\n",
336
+ "\n",
337
+ "# image extension\n",
338
+ "params = {\n",
339
+ " \"input_image\": base64.b64encode(image).decode('utf-8'),\n",
340
+ " \"outpaint_selections\": [\"Left\", \"Right\"],\n",
341
+ " \"image_prompts\": [] # Required parameters, can be an empty list\n",
342
+ "}\n",
343
+ "result = image_prompt(params)\n",
344
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
345
+ ]
346
+ },
347
+ {
348
+ "cell_type": "code",
349
+ "execution_count": null,
350
+ "metadata": {},
351
+ "outputs": [],
352
+ "source": [
353
+ "# partial redraw\n",
354
+ "\n",
355
+ "params = {\n",
356
+ " \"prompt\": \"1girl sitting on the chair\",\n",
357
+ " \"input_image\": base64.b64encode(source).decode('utf-8'),\n",
358
+ " \"input_mask\": base64.b64encode(mask).decode('utf-8'),\n",
359
+ " \"image_prompts\": [], # Required parameters, can be an empty list\n",
360
+ " \"async_process\": True\n",
361
+ "}\n",
362
+ "result = image_prompt(params)\n",
363
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
364
+ ]
365
+ },
366
+ {
367
+ "cell_type": "code",
368
+ "execution_count": null,
369
+ "metadata": {},
370
+ "outputs": [],
371
+ "source": [
372
+ "# image prompt\n",
373
+ "\n",
374
+ "params = {\n",
375
+ " \"prompt\": \"1girl sitting on the chair\",\n",
376
+ " \"image_prompts\": [\n",
377
+ " {\n",
378
+ " \"cn_img\": base64.b64encode(source).decode('utf-8'),\n",
379
+ " \"cn_stop\": 0.6,\n",
380
+ " \"cn_weight\": 0.6,\n",
381
+ " \"cn_type\": \"ImagePrompt\"\n",
382
+ " },{\n",
383
+ " \"cn_img\": base64.b64encode(image).decode('utf-8'),\n",
384
+ " \"cn_stop\": 0.6,\n",
385
+ " \"cn_weight\": 0.6,\n",
386
+ " \"cn_type\": \"ImagePrompt\"\n",
387
+ " }]\n",
388
+ " }\n",
389
+ "result = image_prompt(params)\n",
390
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
391
+ ]
392
+ },
393
+ {
394
+ "cell_type": "markdown",
395
+ "metadata": {},
396
+ "source": [
397
+ "# text to image with imageprompt"
398
+ ]
399
+ },
400
+ {
401
+ "cell_type": "code",
402
+ "execution_count": null,
403
+ "metadata": {},
404
+ "outputs": [],
405
+ "source": [
406
+ "import requests\n",
407
+ "import json\n",
408
+ "import base64\n",
409
+ "\n",
410
+ "# text to image with imageprompt Example\n",
411
+ "host = \"http://127.0.0.1:8888\"\n",
412
+ "image = open(\"./imgs/bear.jpg\", \"rb\").read()\n",
413
+ "source = open(\"./imgs/s.jpg\", \"rb\").read()\n",
414
+ "def image_prompt(params: dict) -> dict:\n",
415
+ " \"\"\"\n",
416
+ " image prompt\n",
417
+ " \"\"\"\n",
418
+ " response = requests.post(url=f\"{host}/v2/generation/text-to-image-with-ip\",\n",
419
+ " data=json.dumps(params),\n",
420
+ " headers={\"Content-Type\": \"application/json\"})\n",
421
+ " return response.json()\n",
422
+ "\n",
423
+ "params = {\n",
424
+ " \"prompt\": \"A bear\",\n",
425
+ " \"image_prompts\": [\n",
426
+ " {\n",
427
+ " \"cn_img\": base64.b64encode(source).decode('utf-8'),\n",
428
+ " \"cn_stop\": 0.6,\n",
429
+ " \"cn_weight\": 0.6,\n",
430
+ " \"cn_type\": \"ImagePrompt\"\n",
431
+ " },{\n",
432
+ " \"cn_img\": base64.b64encode(image).decode('utf-8'),\n",
433
+ " \"cn_stop\": 0.6,\n",
434
+ " \"cn_weight\": 0.6,\n",
435
+ " \"cn_type\": \"ImagePrompt\"\n",
436
+ " }\n",
437
+ " ]\n",
438
+ "}\n",
439
+ "result = image_prompt(params)\n",
440
+ "print(json.dumps(result, indent=4, ensure_ascii=False))"
441
+ ]
442
+ }
443
+ ],
444
+ "metadata": {
445
+ "kernelspec": {
446
+ "display_name": "Python 3",
447
+ "language": "python",
448
+ "name": "python3"
449
+ },
450
+ "language_info": {
451
+ "codemirror_mode": {
452
+ "name": "ipython",
453
+ "version": 3
454
+ },
455
+ "file_extension": ".py",
456
+ "mimetype": "text/x-python",
457
+ "name": "python",
458
+ "nbconvert_exporter": "python",
459
+ "pygments_lexer": "ipython3",
460
+ "version": "3.10.10"
461
+ }
462
+ },
463
+ "nbformat": 4,
464
+ "nbformat_minor": 2
465
+ }
Fooocus-API/examples/examples.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import requests
4
+ import base64
5
+
6
+ inpaint_engine = 'v1'
7
+
8
+
9
+ class Config():
10
+ fooocus_host = 'http://127.0.0.1:8888'
11
+
12
+ text2img = '/v1/generation/text-to-image'
13
+ img_upscale = '/v2/generation/image-upscale-vary'
14
+ img_upscale1 = '/v1/generation/image-upscale-vary'
15
+ inpaint_outpaint = '/v2/generation/image-inpait-outpaint'
16
+ inpaint_outpaint1 = '/v1/generation/image-inpait-outpaint'
17
+ img_prompt = '/v2/generation/image-prompt'
18
+ img_prompt1 = '/v1/generation/image-prompt'
19
+
20
+ job_queue = '/v1/generation/job-queue'
21
+ query_job = '/v1/generation/query-job'
22
+
23
+ res_path = '/v1/generation/temp'
24
+
25
+
26
+ cfg = Config()
27
+
28
+ upscale_params = {
29
+ "uov_method": "Upscale (Custom)",
30
+ "upscale_value": 3,
31
+ "input_image": ""
32
+ }
33
+
34
+ inpaint_params = {
35
+ "input_image": "",
36
+ "input_mask": None,
37
+ "inpaint_additional_prompt": None,
38
+ }
39
+
40
+ img_prompt_params = {
41
+ "image_prompts": []
42
+ }
43
+
44
+ headers = {
45
+ "accept": "application/json"
46
+ }
47
+
48
+ imgs_base_path = os.path.join(os.path.dirname(__file__), 'imgs')
49
+
50
+ with open(os.path.join(imgs_base_path, "bear.jpg"), "rb") as f:
51
+ img1 = f.read()
52
+ image_base64 = base64.b64encode(img1).decode('utf-8')
53
+ f.close()
54
+
55
+ with open(os.path.join(imgs_base_path, "s.jpg"), "rb") as f:
56
+ s = f.read()
57
+ s_base64 = base64.b64encode(s).decode('utf-8')
58
+ f.close()
59
+
60
+ with open(os.path.join(imgs_base_path, "m.png"), "rb") as f:
61
+ m = f.read()
62
+ m_base64 = base64.b64encode(m).decode('utf-8')
63
+ f.close()
64
+
65
+
66
+ def upscale_vary(image, params=upscale_params) -> dict:
67
+ """
68
+ Upscale or Vary
69
+ """
70
+ params["input_image"] = image
71
+ data = json.dumps(params)
72
+ response = requests.post(url=f"{cfg.fooocus_host}{cfg.img_upscale}",
73
+ data=data,
74
+ headers=headers,
75
+ timeout=300)
76
+ return response.json()
77
+
78
+
79
+ def inpaint_outpaint(input_image: str, input_mask: str = None, params=inpaint_params) -> dict:
80
+ """
81
+ Inpaint or Outpaint
82
+ """
83
+ params["input_image"] = input_image
84
+ params["input_mask"] = input_mask
85
+ params["outpaint_selections"] = ["Left", "Right"]
86
+ params["prompt"] = "cat"
87
+ data = json.dumps(params)
88
+ response = requests.post(url=f"{cfg.fooocus_host}{cfg.inpaint_outpaint}",
89
+ data=data,
90
+ headers=headers,
91
+ timeout=300)
92
+ return response.json()
93
+
94
+
95
+ def image_prompt(img_prompt: list, params: dict) -> dict:
96
+ """
97
+ Image Prompt
98
+ """
99
+ params["image_prompts"] = img_prompt
100
+ data = json.dumps(params)
101
+ response = requests.post(url=f"{cfg.fooocus_host}{cfg.img_prompt}",
102
+ data=data,
103
+ headers=headers,
104
+ timeout=300)
105
+ return response.json()
106
+
107
+
108
+ def image_prompt_with_inpaint(img_prompt: list, input_image: str, input_mask: str, params: dict) -> dict:
109
+ """
110
+ Image Prompt
111
+ """
112
+ params["image_prompts"] = img_prompt
113
+ params["input_image"] = input_image
114
+ params["input_mask"] = input_mask
115
+ params["outpaint_selections"] = ["Left", "Right"]
116
+ data = json.dumps(params)
117
+ response = requests.post(url=f"{cfg.fooocus_host}{cfg.img_prompt}",
118
+ data=data,
119
+ headers=headers,
120
+ timeout=300)
121
+ return response.json()
122
+
123
+
124
+ img_prompt = [
125
+ {
126
+ "cn_img": image_base64,
127
+ "cn_stop": 0.6,
128
+ "cn_weight": 0.6,
129
+ "cn_type": "ImagePrompt"
130
+ }
131
+ ]
132
+ # print(upscale_vary(image=image_base64))
133
+ # print(inpaint_outpaint(input_image=s_base64, input_mask=m_base64))
134
+ # print(image_prompt(img_prompt=img_prompt, params=img_prompt_params))
135
+ print(image_prompt_with_inpaint(img_prompt=img_prompt, input_image=s_base64, input_mask=m_base64, params=img_prompt_params))
Fooocus-API/examples/imgs/bear.jpg ADDED
Fooocus-API/examples/imgs/m.png ADDED
Fooocus-API/examples/imgs/s.jpg ADDED
Fooocus-API/fooocus_api_version.py ADDED
@@ -0,0 +1 @@
 
 
1
+ version = '0.3.29'
Fooocus-API/fooocusapi/api.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uvicorn
2
+
3
+ from typing import List, Optional
4
+ from fastapi.responses import JSONResponse
5
+ from fastapi import Depends, FastAPI, Header, Query, Response, UploadFile
6
+ from fastapi.params import File
7
+ from fastapi.staticfiles import StaticFiles
8
+ from fastapi.middleware.cors import CORSMiddleware
9
+
10
+ from fooocusapi.args import args
11
+ from fooocusapi.sql_client import query_history
12
+ from fooocusapi.models import *
13
+ from fooocusapi.api_utils import generation_output, req_to_params
14
+ import fooocusapi.file_utils as file_utils
15
+ from fooocusapi.parameters import GenerationFinishReason, ImageGenerationResult
16
+ from fooocusapi.task_queue import TaskType
17
+ from fooocusapi.worker import process_generate, task_queue, process_top
18
+ from fooocusapi.models_v2 import *
19
+ from fooocusapi.img_utils import base64_to_stream, read_input_image
20
+
21
+ from concurrent.futures import ThreadPoolExecutor
22
+ from modules.util import HWC3
23
+
24
+ app = FastAPI()
25
+
26
+ app.add_middleware(
27
+ CORSMiddleware,
28
+ allow_origins=["*"], # Allow access from all sources
29
+ allow_credentials=True,
30
+ allow_methods=["*"], # Allow all HTTP methods
31
+ allow_headers=["*"], # Allow all request headers
32
+ )
33
+
34
+ work_executor = ThreadPoolExecutor(
35
+ max_workers=task_queue.queue_size * 2, thread_name_prefix="worker_")
36
+
37
+ img_generate_responses = {
38
+ "200": {
39
+ "description": "PNG bytes if request's 'Accept' header is 'image/png', otherwise JSON",
40
+ "content": {
41
+ "application/json": {
42
+ "example": [{
43
+ "base64": "...very long string...",
44
+ "seed": "1050625087",
45
+ "finish_reason": "SUCCESS"
46
+ }]
47
+ },
48
+ "application/json async": {
49
+ "example": {
50
+ "job_id": 1,
51
+ "job_type": "Text to Image"
52
+ }
53
+ },
54
+ "image/png": {
55
+ "example": "PNG bytes, what did you expect?"
56
+ }
57
+ }
58
+ }
59
+ }
60
+
61
+
62
+ def call_worker(req: Text2ImgRequest, accept: str):
63
+ task_type = TaskType.text_2_img
64
+ if isinstance(req, ImgUpscaleOrVaryRequest) or isinstance(req, ImgUpscaleOrVaryRequestJson):
65
+ task_type = TaskType.img_uov
66
+ elif isinstance(req, ImgPromptRequest) or isinstance(req, ImgPromptRequestJson):
67
+ task_type = TaskType.img_prompt
68
+ elif isinstance(req, ImgInpaintOrOutpaintRequest) or isinstance(req, ImgInpaintOrOutpaintRequestJson):
69
+ task_type = TaskType.img_inpaint_outpaint
70
+
71
+ params = req_to_params(req)
72
+ queue_task = task_queue.add_task(
73
+ task_type, {'params': params.__dict__, 'accept': accept, 'require_base64': req.require_base64},
74
+ webhook_url=req.webhook_url)
75
+
76
+ if queue_task is None:
77
+ print("[Task Queue] The task queue has reached limit")
78
+ results = [ImageGenerationResult(im=None, seed=0,
79
+ finish_reason=GenerationFinishReason.queue_is_full)]
80
+ elif req.async_process:
81
+ work_executor.submit(process_generate, queue_task, params)
82
+ results = queue_task
83
+ else:
84
+ results = process_generate(queue_task, params)
85
+
86
+ return results
87
+
88
+
89
+ def stop_worker():
90
+ process_top()
91
+
92
+
93
+ @app.get("/")
94
+ def home():
95
+ return Response(content='Swagger-UI to: <a href="/docs">/docs</a>', media_type="text/html")
96
+
97
+
98
+ @app.get("/ping", description="Returns a simple 'pong' response")
99
+ def ping():
100
+ return Response(content='pong', media_type="text/html")
101
+
102
+
103
+ @app.post("/v1/generation/text-to-image", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses)
104
+ def text2img_generation(req: Text2ImgRequest, accept: str = Header(None),
105
+ accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")):
106
+ if accept_query is not None and len(accept_query) > 0:
107
+ accept = accept_query
108
+
109
+ if accept == 'image/png':
110
+ streaming_output = True
111
+ # image_number auto set to 1 in streaming mode
112
+ req.image_number = 1
113
+ else:
114
+ streaming_output = False
115
+
116
+ results = call_worker(req, accept)
117
+ return generation_output(results, streaming_output, req.require_base64)
118
+
119
+
120
+ @app.post("/v2/generation/text-to-image-with-ip", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses)
121
+ def text_to_img_with_ip(req: Text2ImgRequestWithPrompt,
122
+ accept: str = Header(None),
123
+ accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")):
124
+ if accept_query is not None and len(accept_query) > 0:
125
+ accept = accept_query
126
+
127
+ if accept == 'image/png':
128
+ streaming_output = True
129
+ # image_number auto set to 1 in streaming mode
130
+ req.image_number = 1
131
+ else:
132
+ streaming_output = False
133
+
134
+ default_image_promt = ImagePrompt(cn_img=None)
135
+ image_prompts_files: List[ImagePrompt] = []
136
+ for img_prompt in req.image_prompts:
137
+ img_prompt.cn_img = base64_to_stream(img_prompt.cn_img)
138
+ image = ImagePrompt(cn_img=img_prompt.cn_img,
139
+ cn_stop=img_prompt.cn_stop,
140
+ cn_weight=img_prompt.cn_weight,
141
+ cn_type=img_prompt.cn_type)
142
+ image_prompts_files.append(image)
143
+
144
+ while len(image_prompts_files) <= 4:
145
+ image_prompts_files.append(default_image_promt)
146
+
147
+ req.image_prompts = image_prompts_files
148
+
149
+ results = call_worker(req, accept)
150
+ return generation_output(results, streaming_output, req.require_base64)
151
+
152
+
153
+ @app.post("/v1/generation/image-upscale-vary", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses)
154
+ def img_upscale_or_vary(input_image: UploadFile, req: ImgUpscaleOrVaryRequest = Depends(ImgUpscaleOrVaryRequest.as_form),
155
+ accept: str = Header(None),
156
+ accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")):
157
+ if accept_query is not None and len(accept_query) > 0:
158
+ accept = accept_query
159
+
160
+ if accept == 'image/png':
161
+ streaming_output = True
162
+ # image_number auto set to 1 in streaming mode
163
+ req.image_number = 1
164
+ else:
165
+ streaming_output = False
166
+
167
+ results = call_worker(req, accept)
168
+ return generation_output(results, streaming_output, req.require_base64)
169
+
170
+
171
+ @app.post("/v2/generation/image-upscale-vary", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses)
172
+ def img_upscale_or_vary_v2(req: ImgUpscaleOrVaryRequestJson,
173
+ accept: str = Header(None),
174
+ accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")):
175
+ if accept_query is not None and len(accept_query) > 0:
176
+ accept = accept_query
177
+
178
+ if accept == 'image/png':
179
+ streaming_output = True
180
+ # image_number auto set to 1 in streaming mode
181
+ req.image_number = 1
182
+ else:
183
+ streaming_output = False
184
+ req.input_image = base64_to_stream(req.input_image)
185
+
186
+ default_image_promt = ImagePrompt(cn_img=None)
187
+ image_prompts_files: List[ImagePrompt] = []
188
+ for img_prompt in req.image_prompts:
189
+ img_prompt.cn_img = base64_to_stream(img_prompt.cn_img)
190
+ image = ImagePrompt(cn_img=img_prompt.cn_img,
191
+ cn_stop=img_prompt.cn_stop,
192
+ cn_weight=img_prompt.cn_weight,
193
+ cn_type=img_prompt.cn_type)
194
+ image_prompts_files.append(image)
195
+ while len(image_prompts_files) <= 4:
196
+ image_prompts_files.append(default_image_promt)
197
+ req.image_prompts = image_prompts_files
198
+
199
+ results = call_worker(req, accept)
200
+ return generation_output(results, streaming_output, req.require_base64)
201
+
202
+
203
+ @app.post("/v1/generation/image-inpait-outpaint", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses)
204
+ def img_inpaint_or_outpaint(input_image: UploadFile, req: ImgInpaintOrOutpaintRequest = Depends(ImgInpaintOrOutpaintRequest.as_form),
205
+ accept: str = Header(None),
206
+ accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")):
207
+ if accept_query is not None and len(accept_query) > 0:
208
+ accept = accept_query
209
+
210
+ if accept == 'image/png':
211
+ streaming_output = True
212
+ # image_number auto set to 1 in streaming mode
213
+ req.image_number = 1
214
+ else:
215
+ streaming_output = False
216
+
217
+ results = call_worker(req, accept)
218
+ return generation_output(results, streaming_output, req.require_base64)
219
+
220
+
221
+ @app.post("/v2/generation/image-inpait-outpaint", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses)
222
+ def img_inpaint_or_outpaint_v2(req: ImgInpaintOrOutpaintRequestJson,
223
+ accept: str = Header(None),
224
+ accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")):
225
+ if accept_query is not None and len(accept_query) > 0:
226
+ accept = accept_query
227
+
228
+ if accept == 'image/png':
229
+ streaming_output = True
230
+ # image_number auto set to 1 in streaming mode
231
+ req.image_number = 1
232
+ else:
233
+ streaming_output = False
234
+
235
+ req.input_image = base64_to_stream(req.input_image)
236
+ if req.input_mask is not None:
237
+ req.input_mask = base64_to_stream(req.input_mask)
238
+ default_image_promt = ImagePrompt(cn_img=None)
239
+ image_prompts_files: List[ImagePrompt] = []
240
+ for img_prompt in req.image_prompts:
241
+ img_prompt.cn_img = base64_to_stream(img_prompt.cn_img)
242
+ image = ImagePrompt(cn_img=img_prompt.cn_img,
243
+ cn_stop=img_prompt.cn_stop,
244
+ cn_weight=img_prompt.cn_weight,
245
+ cn_type=img_prompt.cn_type)
246
+ image_prompts_files.append(image)
247
+ while len(image_prompts_files) <= 4:
248
+ image_prompts_files.append(default_image_promt)
249
+ req.image_prompts = image_prompts_files
250
+
251
+ results = call_worker(req, accept)
252
+ return generation_output(results, streaming_output, req.require_base64)
253
+
254
+
255
+ @app.post("/v1/generation/image-prompt", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses)
256
+ def img_prompt(cn_img1: Optional[UploadFile] = File(None),
257
+ req: ImgPromptRequest = Depends(ImgPromptRequest.as_form),
258
+ accept: str = Header(None),
259
+ accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")):
260
+ if accept_query is not None and len(accept_query) > 0:
261
+ accept = accept_query
262
+
263
+ if accept == 'image/png':
264
+ streaming_output = True
265
+ # image_number auto set to 1 in streaming mode
266
+ req.image_number = 1
267
+ else:
268
+ streaming_output = False
269
+
270
+ results = call_worker(req, accept)
271
+ return generation_output(results, streaming_output, req.require_base64)
272
+
273
+
274
+ @app.post("/v2/generation/image-prompt", response_model=List[GeneratedImageResult] | AsyncJobResponse, responses=img_generate_responses)
275
+ def img_prompt(req: ImgPromptRequestJson,
276
+ accept: str = Header(None),
277
+ accept_query: str | None = Query(None, alias='accept', description="Parameter to overvide 'Accept' header, 'image/png' for output bytes")):
278
+ if accept_query is not None and len(accept_query) > 0:
279
+ accept = accept_query
280
+
281
+ if accept == 'image/png':
282
+ streaming_output = True
283
+ # image_number auto set to 1 in streaming mode
284
+ req.image_number = 1
285
+ else:
286
+ streaming_output = False
287
+
288
+ if req.input_image is not None:
289
+ req.input_image = base64_to_stream(req.input_image)
290
+ if req.input_mask is not None:
291
+ req.input_mask = base64_to_stream(req.input_mask)
292
+
293
+ default_image_promt = ImagePrompt(cn_img=None)
294
+ image_prompts_files: List[ImagePrompt] = []
295
+ for img_prompt in req.image_prompts:
296
+ img_prompt.cn_img = base64_to_stream(img_prompt.cn_img)
297
+ image = ImagePrompt(cn_img=img_prompt.cn_img,
298
+ cn_stop=img_prompt.cn_stop,
299
+ cn_weight=img_prompt.cn_weight,
300
+ cn_type=img_prompt.cn_type)
301
+ image_prompts_files.append(image)
302
+
303
+ while len(image_prompts_files) <= 4:
304
+ image_prompts_files.append(default_image_promt)
305
+
306
+ req.image_prompts = image_prompts_files
307
+
308
+ results = call_worker(req, accept)
309
+ return generation_output(results, streaming_output, req.require_base64)
310
+
311
+
312
+ @app.get("/v1/generation/query-job", response_model=AsyncJobResponse, description="Query async generation job")
313
+ def query_job(req: QueryJobRequest = Depends()):
314
+ queue_task = task_queue.get_task(req.job_id, True)
315
+ if queue_task is None:
316
+ return JSONResponse(content=AsyncJobResponse(job_id="",
317
+ job_type="Not Found",
318
+ job_stage="ERROR",
319
+ job_progress=0,
320
+ job_status="Job not found"), status_code=404)
321
+
322
+ return generation_output(queue_task, streaming_output=False, require_base64=False,
323
+ require_step_preivew=req.require_step_preivew)
324
+
325
+
326
+ @app.get("/v1/generation/job-queue", response_model=JobQueueInfo, description="Query job queue info")
327
+ def job_queue():
328
+ return JobQueueInfo(running_size=len(task_queue.queue), finished_size=len(task_queue.history), last_job_id=task_queue.last_job_id)
329
+
330
+
331
+ @app.get("/v1/generation/job-history", response_model=JobHistoryResponse | dict, description="Query historical job data")
332
+ def get_history(job_id: str = None, page: int = 0, page_size: int = 20):
333
+ # Fetch and return the historical tasks
334
+ queue = [JobHistoryInfo(job_id=item.job_id, is_finished=item.is_finished) for item in task_queue.queue]
335
+ if not args.presistent:
336
+ history = [JobHistoryInfo(job_id=item.job_id, is_finished=item.is_finished) for item in task_queue.history]
337
+ return JobHistoryResponse(history=history, queue=queue)
338
+ else:
339
+ history = query_history(task_id=job_id, page=page, page_size=page_size)
340
+ return {
341
+ "history": history,
342
+ "queue": queue
343
+ }
344
+
345
+
346
+ @app.post("/v1/generation/stop", response_model=StopResponse, description="Job stoping")
347
+ def stop():
348
+ stop_worker()
349
+ return StopResponse(msg="success")
350
+
351
+
352
+ @app.post("/v1/tools/describe-image", response_model=DescribeImageResponse)
353
+ def describe_image(image: UploadFile, type: DescribeImageType = Query(DescribeImageType.photo, description="Image type, 'Photo' or 'Anime'")):
354
+ if type == DescribeImageType.photo:
355
+ from extras.interrogate import default_interrogator as default_interrogator_photo
356
+ interrogator = default_interrogator_photo
357
+ else:
358
+ from extras.wd14tagger import default_interrogator as default_interrogator_anime
359
+ interrogator = default_interrogator_anime
360
+ img = HWC3(read_input_image(image))
361
+ result = interrogator(img)
362
+ return DescribeImageResponse(describe=result)
363
+
364
+
365
+ @app.get("/v1/engines/all-models", response_model=AllModelNamesResponse, description="Get all filenames of base model and lora")
366
+ def all_models():
367
+ import modules.config as config
368
+ return AllModelNamesResponse(model_filenames=config.model_filenames, lora_filenames=config.lora_filenames)
369
+
370
+
371
+ @app.post("/v1/engines/refresh-models", response_model=AllModelNamesResponse, description="Refresh local files and get all filenames of base model and lora")
372
+ def refresh_models():
373
+ import modules.config as config
374
+ config.update_all_model_names()
375
+ return AllModelNamesResponse(model_filenames=config.model_filenames, lora_filenames=config.lora_filenames)
376
+
377
+
378
+ @app.get("/v1/engines/styles", response_model=List[str], description="Get all legal Fooocus styles")
379
+ def all_styles():
380
+ from modules.sdxl_styles import legal_style_names
381
+ return legal_style_names
382
+
383
+
384
+ app.mount("/files", StaticFiles(directory=file_utils.output_dir), name="files")
385
+
386
+
387
+ def start_app(args):
388
+ file_utils.static_serve_base_url = args.base_url + "/files/"
389
+ uvicorn.run("fooocusapi.api:app", host=args.host,
390
+ port=args.port, log_level=args.log_level)
Fooocus-API/fooocusapi/api_utils.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from fastapi import Response
4
+ from fooocusapi.file_utils import get_file_serve_url, output_file_to_base64img, output_file_to_bytesimg
5
+ from fooocusapi.img_utils import read_input_image
6
+ from fooocusapi.models import AsyncJobResponse, AsyncJobStage, GeneratedImageResult, GenerationFinishReason, ImgInpaintOrOutpaintRequest, ImgPromptRequest, ImgUpscaleOrVaryRequest, Text2ImgRequest
7
+ from fooocusapi.models_v2 import *
8
+ from fooocusapi.parameters import ImageGenerationParams, ImageGenerationResult, default_inpaint_engine_version, default_sampler, default_scheduler, default_base_model_name, default_refiner_model_name
9
+ from fooocusapi.task_queue import QueueTask
10
+
11
+ from modules import flags
12
+ from modules import config
13
+ from modules.sdxl_styles import legal_style_names
14
+
15
+
16
+ def req_to_params(req: Text2ImgRequest) -> ImageGenerationParams:
17
+ if req.base_model_name is not None:
18
+ if req.base_model_name not in config.model_filenames:
19
+ print(f"[Warning] Wrong base_model_name input: {req.base_model_name}, using default")
20
+ req.base_model_name = default_base_model_name
21
+
22
+ if req.refiner_model_name is not None and req.refiner_model_name != 'None':
23
+ if req.refiner_model_name not in config.model_filenames:
24
+ print(f"[Warning] Wrong refiner_model_name input: {req.refiner_model_name}, using default")
25
+ req.refiner_model_name = default_refiner_model_name
26
+
27
+ for l in req.loras:
28
+ if l.model_name != 'None' and l.model_name not in config.lora_filenames:
29
+ print(f"[Warning] Wrong lora model_name input: {l.model_name}, using 'None'")
30
+ l.model_name = 'None'
31
+
32
+ prompt = req.prompt
33
+ negative_prompt = req.negative_prompt
34
+ style_selections = [
35
+ s for s in req.style_selections if s in legal_style_names]
36
+ performance_selection = req.performance_selection.value
37
+ aspect_ratios_selection = req.aspect_ratios_selection
38
+ image_number = req.image_number
39
+ image_seed = None if req.image_seed == -1 else req.image_seed
40
+ sharpness = req.sharpness
41
+ guidance_scale = req.guidance_scale
42
+ base_model_name = req.base_model_name
43
+ refiner_model_name = req.refiner_model_name
44
+ refiner_switch = req.refiner_switch
45
+ loras = [(lora.model_name, lora.weight) for lora in req.loras]
46
+ uov_input_image = None
47
+ if not isinstance(req, Text2ImgRequestWithPrompt):
48
+ if isinstance(req, ImgUpscaleOrVaryRequest) or isinstance(req, ImgUpscaleOrVaryRequestJson):
49
+ uov_input_image = read_input_image(req.input_image)
50
+ uov_method = flags.disabled if not (isinstance(
51
+ req, ImgUpscaleOrVaryRequest) or isinstance(req, ImgUpscaleOrVaryRequestJson)) else req.uov_method.value
52
+ upscale_value = None if not (isinstance(
53
+ req, ImgUpscaleOrVaryRequest) or isinstance(req, ImgUpscaleOrVaryRequestJson)) else req.upscale_value
54
+ outpaint_selections = [] if not (isinstance(
55
+ req, ImgInpaintOrOutpaintRequest) or isinstance(req, ImgInpaintOrOutpaintRequestJson)) else [
56
+ s.value for s in req.outpaint_selections]
57
+ outpaint_distance_left = None if not (isinstance(
58
+ req, ImgInpaintOrOutpaintRequest) or isinstance(req, ImgInpaintOrOutpaintRequestJson)) else req.outpaint_distance_left
59
+ outpaint_distance_right = None if not (isinstance(
60
+ req, ImgInpaintOrOutpaintRequest) or isinstance(req, ImgInpaintOrOutpaintRequestJson)) else req.outpaint_distance_right
61
+ outpaint_distance_top = None if not (isinstance(
62
+ req, ImgInpaintOrOutpaintRequest) or isinstance(req, ImgInpaintOrOutpaintRequestJson)) else req.outpaint_distance_top
63
+ outpaint_distance_bottom = None if not (isinstance(
64
+ req, ImgInpaintOrOutpaintRequest) or isinstance(req, ImgInpaintOrOutpaintRequestJson)) else req.outpaint_distance_bottom
65
+
66
+ if refiner_model_name == '':
67
+ refiner_model_name = 'None'
68
+
69
+ inpaint_input_image = None
70
+ inpaint_additional_prompt = None
71
+ if (isinstance(req, ImgInpaintOrOutpaintRequest) or isinstance(req, ImgInpaintOrOutpaintRequestJson)) and req.input_image is not None:
72
+ inpaint_additional_prompt = req.inpaint_additional_prompt
73
+ input_image = read_input_image(req.input_image)
74
+ input_mask = None
75
+ if req.input_mask is not None:
76
+ input_mask = read_input_image(req.input_mask)
77
+ inpaint_input_image = {
78
+ 'image': input_image,
79
+ 'mask': input_mask
80
+ }
81
+
82
+ image_prompts = []
83
+ if isinstance(req, ImgPromptRequest) or isinstance(req, ImgPromptRequestJson) or isinstance(req, Text2ImgRequestWithPrompt) or isinstance(req, ImgUpscaleOrVaryRequestJson) or isinstance(req, ImgInpaintOrOutpaintRequestJson):
84
+ # Auto set mixing_image_prompt_and_inpaint to True
85
+ if len(req.image_prompts) > 0 and uov_input_image is not None:
86
+ print("[INFO] Mixing image prompt and vary upscale is set to True")
87
+ req.advanced_params.mixing_image_prompt_and_vary_upscale = True
88
+ elif len(req.image_prompts) > 0 and not isinstance(req, Text2ImgRequestWithPrompt) and req.input_image is not None and req.advanced_params is not None:
89
+ print("[INFO] Mixing image prompt and inpaint is set to True")
90
+ req.advanced_params.mixing_image_prompt_and_inpaint = True
91
+
92
+ for img_prompt in req.image_prompts:
93
+ if img_prompt.cn_img is not None:
94
+ cn_img = read_input_image(img_prompt.cn_img)
95
+ if img_prompt.cn_stop is None or img_prompt.cn_stop == 0:
96
+ img_prompt.cn_stop = flags.default_parameters[img_prompt.cn_type.value][0]
97
+ if img_prompt.cn_weight is None or img_prompt.cn_weight == 0:
98
+ img_prompt.cn_weight = flags.default_parameters[img_prompt.cn_type.value][1]
99
+ image_prompts.append(
100
+ (cn_img, img_prompt.cn_stop, img_prompt.cn_weight, img_prompt.cn_type.value))
101
+
102
+ advanced_params = None
103
+ if req.advanced_params is not None:
104
+ adp = req.advanced_params
105
+
106
+ if adp.refiner_swap_method not in ['joint', 'separate', 'vae']:
107
+ print(f"[Warning] Wrong refiner_swap_method input: {adp.refiner_swap_method}, using default")
108
+ adp.refiner_swap_method = 'joint'
109
+
110
+ if adp.sampler_name not in flags.sampler_list:
111
+ print(f"[Warning] Wrong sampler_name input: {adp.sampler_name}, using default")
112
+ adp.sampler_name = default_sampler
113
+
114
+ if adp.scheduler_name not in flags.scheduler_list:
115
+ print(f"[Warning] Wrong scheduler_name input: {adp.scheduler_name}, using default")
116
+ adp.scheduler_name = default_scheduler
117
+
118
+ if adp.inpaint_engine not in flags.inpaint_engine_versions:
119
+ print(f"[Warning] Wrong inpaint_engine input: {adp.inpaint_engine}, using default")
120
+ adp.inpaint_engine = default_inpaint_engine_version
121
+
122
+ advanced_params = [
123
+ adp.disable_preview, adp.adm_scaler_positive, adp.adm_scaler_negative, adp.adm_scaler_end, adp.adaptive_cfg, adp.sampler_name, \
124
+ adp.scheduler_name, False, adp.overwrite_step, adp.overwrite_switch, adp.overwrite_width, adp.overwrite_height, \
125
+ adp.overwrite_vary_strength, adp.overwrite_upscale_strength, \
126
+ adp.mixing_image_prompt_and_vary_upscale, adp.mixing_image_prompt_and_inpaint, \
127
+ adp.debugging_cn_preprocessor, adp.skipping_cn_preprocessor, adp.controlnet_softness, adp.canny_low_threshold, adp.canny_high_threshold, \
128
+ adp.refiner_swap_method, \
129
+ adp.freeu_enabled, adp.freeu_b1, adp.freeu_b2, adp.freeu_s1, adp.freeu_s2, \
130
+ adp.debugging_inpaint_preprocessor, adp.inpaint_disable_initial_latent, adp.inpaint_engine, adp.inpaint_strength, adp.inpaint_respective_field, \
131
+ False, adp.invert_mask_checkbox, adp.inpaint_erode_or_dilate
132
+ ]
133
+
134
+ return ImageGenerationParams(prompt=prompt,
135
+ negative_prompt=negative_prompt,
136
+ style_selections=style_selections,
137
+ performance_selection=performance_selection,
138
+ aspect_ratios_selection=aspect_ratios_selection,
139
+ image_number=image_number,
140
+ image_seed=image_seed,
141
+ sharpness=sharpness,
142
+ guidance_scale=guidance_scale,
143
+ base_model_name=base_model_name,
144
+ refiner_model_name=refiner_model_name,
145
+ refiner_switch=refiner_switch,
146
+ loras=loras,
147
+ uov_input_image=uov_input_image,
148
+ uov_method=uov_method,
149
+ upscale_value=upscale_value,
150
+ outpaint_selections=outpaint_selections,
151
+ outpaint_distance_left=outpaint_distance_left,
152
+ outpaint_distance_right=outpaint_distance_right,
153
+ outpaint_distance_top=outpaint_distance_top,
154
+ outpaint_distance_bottom=outpaint_distance_bottom,
155
+ inpaint_input_image=inpaint_input_image,
156
+ inpaint_additional_prompt=inpaint_additional_prompt,
157
+ image_prompts=image_prompts,
158
+ advanced_params=advanced_params,
159
+ )
160
+
161
+
162
+ def generation_output(results: QueueTask | List[ImageGenerationResult], streaming_output: bool, require_base64: bool, require_step_preivew: bool=False) -> Response | List[GeneratedImageResult] | AsyncJobResponse:
163
+ if isinstance(results, QueueTask):
164
+ task = results
165
+ job_stage = AsyncJobStage.running
166
+ job_result = None
167
+ if task.start_millis == 0:
168
+ job_stage = AsyncJobStage.waiting
169
+ if task.is_finished:
170
+ if task.finish_with_error:
171
+ job_stage = AsyncJobStage.error
172
+ else:
173
+ if task.task_result != None:
174
+ job_stage = AsyncJobStage.success
175
+ task_result_require_base64 = False
176
+ if 'require_base64' in task.req_param and task.req_param['require_base64']:
177
+ task_result_require_base64 = True
178
+
179
+ job_result = generation_output(task.task_result, False, task_result_require_base64)
180
+ job_step_preview = None if not require_step_preivew else task.task_step_preview
181
+ return AsyncJobResponse(job_id=task.job_id,
182
+ job_type=task.type,
183
+ job_stage=job_stage,
184
+ job_progress=task.finish_progress,
185
+ job_status=task.task_status,
186
+ job_step_preview=job_step_preview,
187
+ job_result=job_result)
188
+
189
+ if streaming_output:
190
+ if len(results) == 0:
191
+ return Response(status_code=500)
192
+ result = results[0]
193
+ if result.finish_reason == GenerationFinishReason.queue_is_full:
194
+ return Response(status_code=409, content=result.finish_reason.value)
195
+ elif result.finish_reason == GenerationFinishReason.user_cancel:
196
+ return Response(status_code=400, content=result.finish_reason.value)
197
+ elif result.finish_reason == GenerationFinishReason.error:
198
+ return Response(status_code=500, content=result.finish_reason.value)
199
+
200
+ bytes = output_file_to_bytesimg(results[0].im)
201
+ return Response(bytes, media_type='image/png')
202
+ else:
203
+ results = [GeneratedImageResult(
204
+ base64=output_file_to_base64img(
205
+ item.im) if require_base64 else None,
206
+ url=get_file_serve_url(item.im),
207
+ seed=item.seed,
208
+ finish_reason=item.finish_reason) for item in results]
209
+ return results
210
+
211
+
212
+ class QueueReachLimitException(Exception):
213
+ pass
Fooocus-API/fooocusapi/args.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fooocusapi.base_args import add_base_args
2
+ import ldm_patched.modules.args_parser as args_parser
3
+
4
+ # Add Fooocus-API args to parser
5
+ add_base_args(args_parser.parser, False)
6
+
7
+ # Apply Fooocus's args
8
+ from args_manager import args_parser
9
+
10
+ # Override the port default value
11
+ args_parser.parser.set_defaults(
12
+ port=8888
13
+ )
14
+
15
+ # Execute args parse again
16
+ args_parser.args = args_parser.parser.parse_args()
17
+ args = args_parser.args
Fooocus-API/fooocusapi/base_args.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from argparse import ArgumentParser
2
+
3
+
4
+ def add_base_args(parser: ArgumentParser, before_prepared: bool):
5
+ if before_prepared:
6
+ parser.add_argument("--port", type=int, default=8888, help="Set the listen port, default: 8888")
7
+
8
+ parser.add_argument("--host", type=str, default='127.0.0.1', help="Set the listen host, default: 127.0.0.1")
9
+ parser.add_argument("--base-url", type=str, default=None, help="Set base url for outside visit, default is http://host:port")
10
+ parser.add_argument("--log-level", type=str, default='info', help="Log info for Uvicorn, default: info")
11
+ parser.add_argument("--sync-repo", default=None, help="Sync dependent git repositories to local, 'skip' for skip sync action, 'only' for only do the sync action and not launch app")
12
+ parser.add_argument("--skip-pip", default=False, action="store_true", help="Skip automatic pip install when setup")
13
+ parser.add_argument("--preload-pipeline", default=False, action="store_true", help="Preload pipeline before start http server")
14
+ parser.add_argument("--queue-size", type=int, default=3, help="Working queue size, default: 3, generation requests exceeding working queue size will return failure")
15
+ parser.add_argument("--queue-history", type=int, default=0, help="Finished jobs reserve size, tasks exceeding the limit will be deleted, including output image files, default: 0, means no limit")
16
+ parser.add_argument('--webhook-url', type=str, default=None, help='The URL to send a POST request when a job is finished')
17
+ parser.add_argument('--presistent', default=False, action="store_true", help="Store history to db")
Fooocus-API/fooocusapi/file_utils.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import datetime
3
+ from io import BytesIO
4
+ import os
5
+ import numpy as np
6
+ from PIL import Image
7
+ import uuid
8
+
9
+ output_dir = os.path.abspath(os.path.join(
10
+ os.path.dirname(__file__), '..', 'outputs', 'files'))
11
+ os.makedirs(output_dir, exist_ok=True)
12
+
13
+ static_serve_base_url = 'http://127.0.0.1:8888/files/'
14
+
15
+
16
+ def save_output_file(img: np.ndarray) -> str:
17
+ current_time = datetime.datetime.now()
18
+ date_string = current_time.strftime("%Y-%m-%d")
19
+
20
+ filename = os.path.join(date_string, str(uuid.uuid4()) + '.png')
21
+ file_path = os.path.join(output_dir, filename)
22
+
23
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
24
+ Image.fromarray(img).save(file_path)
25
+ return filename
26
+
27
+
28
+ def delete_output_file(filename: str):
29
+ file_path = os.path.join(output_dir, filename)
30
+ if not os.path.exists(file_path) or not os.path.isfile(file_path):
31
+ return
32
+ try:
33
+ os.remove(file_path)
34
+ except OSError:
35
+ print(f"Delete output file failed: {filename}")
36
+
37
+
38
+ def output_file_to_base64img(filename: str | None) -> str | None:
39
+ if filename is None:
40
+ return None
41
+ file_path = os.path.join(output_dir, filename)
42
+ if not os.path.exists(file_path) or not os.path.isfile(file_path):
43
+ return None
44
+
45
+ img = Image.open(file_path)
46
+ output_buffer = BytesIO()
47
+ img.save(output_buffer, format='PNG')
48
+ byte_data = output_buffer.getvalue()
49
+ base64_str = base64.b64encode(byte_data)
50
+ return base64_str
51
+
52
+
53
+ def output_file_to_bytesimg(filename: str | None) -> bytes | None:
54
+ if filename is None:
55
+ return None
56
+ file_path = os.path.join(output_dir, filename)
57
+ if not os.path.exists(file_path) or not os.path.isfile(file_path):
58
+ return None
59
+
60
+ img = Image.open(file_path)
61
+ output_buffer = BytesIO()
62
+ img.save(output_buffer, format='PNG')
63
+ byte_data = output_buffer.getvalue()
64
+ return byte_data
65
+
66
+
67
+ def get_file_serve_url(filename: str | None) -> str | None:
68
+ if filename is None:
69
+ return None
70
+ return static_serve_base_url + filename.replace('\\', '/')
Fooocus-API/fooocusapi/img_utils.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import requests
3
+ import numpy as np
4
+
5
+ from io import BytesIO
6
+ from fastapi import UploadFile
7
+ from PIL import Image
8
+
9
+
10
+ def narray_to_base64img(narray: np.ndarray) -> str:
11
+ if narray is None:
12
+ return None
13
+
14
+ img = Image.fromarray(narray)
15
+ output_buffer = BytesIO()
16
+ img.save(output_buffer, format='PNG')
17
+ byte_data = output_buffer.getvalue()
18
+ base64_str = base64.b64encode(byte_data)
19
+ return base64_str
20
+
21
+
22
+ def narray_to_bytesimg(narray) -> bytes:
23
+ if narray is None:
24
+ return None
25
+
26
+ img = Image.fromarray(narray)
27
+ output_buffer = BytesIO()
28
+ img.save(output_buffer, format='PNG')
29
+ byte_data = output_buffer.getvalue()
30
+ return byte_data
31
+
32
+
33
+ def read_input_image(input_image: UploadFile | None) -> np.ndarray | None:
34
+ if input_image is None:
35
+ return None
36
+ input_image_bytes = input_image.file.read()
37
+ pil_image = Image.open(BytesIO(input_image_bytes))
38
+ image = np.array(pil_image)
39
+ return image
40
+
41
+ def base64_to_stream(image: str) -> UploadFile | None:
42
+ if image == '':
43
+ return None
44
+ if image.startswith('http'):
45
+ return get_check_image(url=image)
46
+ if image.startswith('data:image'):
47
+ image = image.split(sep=',', maxsplit=1)[1]
48
+ image_bytes = base64.b64decode(image)
49
+ byte_stream = BytesIO()
50
+ byte_stream.write(image_bytes)
51
+ byte_stream.seek(0)
52
+ return UploadFile(file=byte_stream)
53
+
54
+ def get_check_image(url: str) -> UploadFile | None:
55
+ if url == '':
56
+ return None
57
+ try:
58
+ response = requests.get(url, timeout=10)
59
+ binary_image = response.content
60
+ except:
61
+ return None
62
+ try:
63
+ buffer = BytesIO(binary_image)
64
+ Image.open(buffer)
65
+ except:
66
+ return None
67
+ byte_stream = BytesIO()
68
+ byte_stream.write(binary_image)
69
+ byte_stream.seek(0)
70
+ return UploadFile(file=byte_stream)
Fooocus-API/fooocusapi/models.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import Form, UploadFile
2
+ from fastapi.params import File
3
+ from fastapi.exceptions import RequestValidationError
4
+
5
+ from pydantic import BaseModel, ConfigDict, Field, TypeAdapter, ValidationError
6
+ from pydantic_core import InitErrorDetails
7
+
8
+ from typing import List, Tuple
9
+ from enum import Enum
10
+
11
+ from fooocusapi.parameters import (GenerationFinishReason,
12
+ default_styles,
13
+ default_base_model_name,
14
+ default_refiner_model_name,
15
+ default_refiner_switch,
16
+ default_loras,
17
+ default_cfg_scale,
18
+ default_prompt_negative,
19
+ default_aspect_ratio,
20
+ default_sampler,
21
+ default_scheduler)
22
+
23
+ from fooocusapi.task_queue import TaskType
24
+
25
+
26
+ class Lora(BaseModel):
27
+ model_name: str
28
+ weight: float = Field(default=0.5, ge=-2, le=2)
29
+
30
+ model_config = ConfigDict(
31
+ protected_namespaces=('protect_me_', 'also_protect_')
32
+ )
33
+
34
+
35
+ LoraList = TypeAdapter(List[Lora])
36
+ default_loras_model = [Lora(model_name=lora[0], weight=lora[1]) for lora in default_loras if lora[0] != 'None']
37
+ default_loras_json = LoraList.dump_json(default_loras_model)
38
+
39
+
40
+ class PerfomanceSelection(str, Enum):
41
+ speed = 'Speed'
42
+ quality = 'Quality'
43
+ extreme_speed = 'Extreme Speed'
44
+
45
+
46
+ class UpscaleOrVaryMethod(str, Enum):
47
+ subtle_variation = 'Vary (Subtle)'
48
+ strong_variation = 'Vary (Strong)'
49
+ upscale_15 = 'Upscale (1.5x)'
50
+ upscale_2 = 'Upscale (2x)'
51
+ upscale_fast = 'Upscale (Fast 2x)'
52
+ upscale_custom = 'Upscale (Custom)'
53
+
54
+ class OutpaintExpansion(str, Enum):
55
+ left = 'Left'
56
+ right = 'Right'
57
+ top = 'Top'
58
+ bottom = 'Bottom'
59
+
60
+
61
+ class ControlNetType(str, Enum):
62
+ cn_ip = "ImagePrompt"
63
+ cn_ip_face = "FaceSwap"
64
+ cn_canny = "PyraCanny"
65
+ cn_cpds = "CPDS"
66
+
67
+
68
+ class ImagePrompt(BaseModel):
69
+ cn_img: UploadFile | None = Field(default=None)
70
+ cn_stop: float | None = Field(default=None, ge=0, le=1)
71
+ cn_weight: float | None = Field(default=None, ge=0, le=2, description="None for default value")
72
+ cn_type: ControlNetType = Field(default=ControlNetType.cn_ip)
73
+
74
+
75
+ class AdvancedParams(BaseModel):
76
+ disable_preview: bool = Field(False, description="Disable preview during generation")
77
+ adm_scaler_positive: float = Field(1.5, description="Positive ADM Guidance Scaler", ge=0.1, le=3.0)
78
+ adm_scaler_negative: float = Field(0.8, description="Negative ADM Guidance Scaler", ge=0.1, le=3.0)
79
+ adm_scaler_end: float = Field(0.3, description="ADM Guidance End At Step", ge=0.0, le=1.0)
80
+ refiner_swap_method: str = Field('joint', description="Refiner swap method")
81
+ adaptive_cfg: float = Field(7.0, description="CFG Mimicking from TSNR", ge=1.0, le=30.0)
82
+ sampler_name: str = Field(default_sampler, description="Sampler")
83
+ scheduler_name: str = Field(default_scheduler, description="Scheduler")
84
+ overwrite_step: int = Field(-1, description="Forced Overwrite of Sampling Step", ge=-1, le=200)
85
+ overwrite_switch: int = Field(-1, description="Forced Overwrite of Refiner Switch Step", ge=-1, le=200)
86
+ overwrite_width: int = Field(-1, description="Forced Overwrite of Generating Width", ge=-1, le=2048)
87
+ overwrite_height: int = Field(-1, description="Forced Overwrite of Generating Height", ge=-1, le=2048)
88
+ overwrite_vary_strength: float = Field(-1, description='Forced Overwrite of Denoising Strength of "Vary"', ge=-1, le=1.0)
89
+ overwrite_upscale_strength: float = Field(-1, description='Forced Overwrite of Denoising Strength of "Upscale"', ge=-1, le=1.0)
90
+ mixing_image_prompt_and_vary_upscale: bool = Field(False, description="Mixing Image Prompt and Vary/Upscale")
91
+ mixing_image_prompt_and_inpaint: bool = Field(False, description="Mixing Image Prompt and Inpaint")
92
+ debugging_cn_preprocessor: bool = Field(False, description="Debug Preprocessors")
93
+ skipping_cn_preprocessor: bool = Field(False, description="Skip Preprocessors")
94
+ controlnet_softness: float = Field(0.25, description="Softness of ControlNet", ge=0.0, le=1.0)
95
+ canny_low_threshold: int = Field(64, description="Canny Low Threshold", ge=1, le=255)
96
+ canny_high_threshold: int = Field(128, description="Canny High Threshold", ge=1, le=255)
97
+ freeu_enabled: bool = Field(False, description="FreeU enabled")
98
+ freeu_b1: float = Field(1.01, description="FreeU B1")
99
+ freeu_b2: float = Field(1.02, description="FreeU B2")
100
+ freeu_s1: float = Field(0.99, description="FreeU B3")
101
+ freeu_s2: float = Field(0.95, description="FreeU B4")
102
+ debugging_inpaint_preprocessor: bool = Field(False, description="Debug Inpaint Preprocessing")
103
+ inpaint_disable_initial_latent: bool = Field(False, description="Disable initial latent in inpaint")
104
+ inpaint_engine: str = Field('v1', description="Inpaint Engine")
105
+ inpaint_strength: float = Field(1.0, description="Inpaint Denoising Strength", ge=0.0, le=1.0)
106
+ inpaint_respective_field: float = Field(1.0, description="Inpaint Respective Field", ge=0.0, le=1.0)
107
+ invert_mask_checkbox: bool = Field(False, description="Invert Mask")
108
+ inpaint_erode_or_dilate: int = Field(0, description="Mask Erode or Dilate", ge=-64, le=64)
109
+
110
+
111
+ class Text2ImgRequest(BaseModel):
112
+ prompt: str = ''
113
+ negative_prompt: str = default_prompt_negative
114
+ style_selections: List[str] = default_styles
115
+ performance_selection: PerfomanceSelection = PerfomanceSelection.speed
116
+ aspect_ratios_selection: str = default_aspect_ratio
117
+ image_number: int = Field(default=1, description="Image number", ge=1, le=32)
118
+ image_seed: int = Field(default=-1, description="Seed to generate image, -1 for random")
119
+ sharpness: float = Field(default=2.0, ge=0.0, le=30.0)
120
+ guidance_scale: float = Field(default=default_cfg_scale, ge=1.0, le=30.0)
121
+ base_model_name: str = default_base_model_name
122
+ refiner_model_name: str = default_refiner_model_name
123
+ refiner_switch: float = Field(default=default_refiner_switch, description="Refiner Switch At", ge=0.1, le=1.0)
124
+ loras: List[Lora] = Field(default=default_loras_model)
125
+ advanced_params: AdvancedParams | None = AdvancedParams()
126
+ require_base64: bool = Field(default=False, description="Return base64 data of generated image")
127
+ async_process: bool = Field(default=False, description="Set to true will run async and return job info for retrieve generataion result later")
128
+ webhook_url: str | None = Field(default=None, description="Optional URL for a webhook callback. If provided, the system will send a POST request to this URL upon task completion or failure."
129
+ " This allows for asynchronous notification of task status.")
130
+
131
+ def style_selection_parser(style_selections: str) -> List[str]:
132
+ style_selection_arr: List[str] = []
133
+ if style_selections is None or len(style_selections) == 0:
134
+ return []
135
+ for part in style_selections:
136
+ if len(part) > 0:
137
+ for s in part.split(','):
138
+ style = s.strip()
139
+ style_selection_arr.append(style)
140
+ return style_selection_arr
141
+
142
+ def lora_parser(loras: str) -> List[Lora]:
143
+ loras_model: List[Lora] = []
144
+ if loras is None or len(loras) == 0:
145
+ return []
146
+ try:
147
+ loras_model = LoraList.validate_json(loras)
148
+ return loras_model
149
+ except ValidationError as ve:
150
+ errs = ve.errors()
151
+ raise RequestValidationError(errors=[errs])
152
+
153
+ def advanced_params_parser(advanced_params: str | None) -> AdvancedParams:
154
+ advanced_params_obj = None
155
+ if advanced_params is not None and len(advanced_params) > 0:
156
+ try:
157
+ advanced_params_obj = AdvancedParams.__pydantic_validator__.validate_json(advanced_params)
158
+ return advanced_params_obj
159
+ except ValidationError as ve:
160
+ errs = ve.errors()
161
+ raise RequestValidationError(errors=[errs])
162
+ return advanced_params_obj
163
+
164
+ def oupaint_selections_parser(outpaint_selections: str) -> List[OutpaintExpansion]:
165
+ outpaint_selections_arr: List[OutpaintExpansion] = []
166
+ if outpaint_selections is None or len(outpaint_selections) == 0:
167
+ return []
168
+ for part in outpaint_selections:
169
+ if len(part) > 0:
170
+ for s in part.split(','):
171
+ try:
172
+ expansion = OutpaintExpansion(s)
173
+ outpaint_selections_arr.append(expansion)
174
+ except ValueError as ve:
175
+ err = InitErrorDetails(type='enum', loc=['outpaint_selections'],
176
+ input=outpaint_selections,
177
+ ctx={
178
+ 'expected': "Literal 'Left', 'Right', 'Top', 'Bottom' seperated by comma"
179
+ })
180
+ raise RequestValidationError(errors=[err])
181
+ return outpaint_selections_arr
182
+
183
+ def image_prompt_parser(image_prompts_config: List[Tuple]) -> List[ImagePrompt]:
184
+ image_prompts: List[ImagePrompt] = []
185
+ if image_prompts_config is None or len(image_prompts_config) == 0:
186
+ return []
187
+ for config in image_prompts_config:
188
+ cn_img, cn_stop, cn_weight, cn_type = config
189
+ image_prompts.append(ImagePrompt(cn_img=cn_img, cn_stop=cn_stop,
190
+ cn_weight=cn_weight, cn_type=cn_type))
191
+ return image_prompts
192
+
193
+
194
+ class ImgUpscaleOrVaryRequest(Text2ImgRequest):
195
+ input_image: UploadFile
196
+ uov_method: UpscaleOrVaryMethod
197
+ upscale_value: float | None
198
+
199
+ @classmethod
200
+ def as_form(cls, input_image: UploadFile = Form(description="Init image for upsacale or outpaint"),
201
+ uov_method: UpscaleOrVaryMethod = Form(),
202
+ upscale_value: float | None = Form(None, description="Upscale custom value, None for default value", ge=1.0, le=5.0),
203
+ prompt: str = Form(''),
204
+ negative_prompt: str = Form(default_prompt_negative),
205
+ style_selections: List[str] = Form(default_styles, description="Fooocus style selections, seperated by comma"),
206
+ performance_selection: PerfomanceSelection = Form(PerfomanceSelection.speed, description="Performance Selection, one of 'Speed','Quality','Extreme Speed'"),
207
+ aspect_ratios_selection: str = Form(default_aspect_ratio, description="Aspect Ratios Selection, default 1152*896"),
208
+ image_number: int = Form(default=1, description="Image number", ge=1, le=32),
209
+ image_seed: int = Form(default=-1, description="Seed to generate image, -1 for random"),
210
+ sharpness: float = Form(default=2.0, ge=0.0, le=30.0),
211
+ guidance_scale: float = Form(default=default_cfg_scale, ge=1.0, le=30.0),
212
+ base_model_name: str = Form(default_base_model_name, description="checkpoint file name"),
213
+ refiner_model_name: str = Form(default_refiner_model_name, description="refiner file name"),
214
+ refiner_switch: float = Form(default=default_refiner_switch, description="Refiner Switch At", ge=0.1, le=1.0),
215
+ loras: str | None = Form(default=default_loras_json, description='Lora config in JSON. Format as [{"model_name": "sd_xl_offset_example-lora_1.0.safetensors", "weight": 0.5}]'),
216
+ advanced_params: str | None = Form(default=None, description="Advanced parameters in JSON"),
217
+ require_base64: bool = Form(default=False, description="Return base64 data of generated image"),
218
+ async_process: bool = Form(default=False, description="Set to true will run async and return job info for retrieve generataion result later"),
219
+ ):
220
+ style_selection_arr = style_selection_parser(style_selections)
221
+ loras_model = lora_parser(loras)
222
+ advanced_params_obj = advanced_params_parser(advanced_params)
223
+
224
+ return cls(input_image=input_image, uov_method=uov_method,upscale_value=upscale_value,
225
+ prompt=prompt, negative_prompt=negative_prompt, style_selections=style_selection_arr,
226
+ performance_selection=performance_selection, aspect_ratios_selection=aspect_ratios_selection,
227
+ image_number=image_number, image_seed=image_seed, sharpness=sharpness, guidance_scale=guidance_scale,
228
+ base_model_name=base_model_name, refiner_model_name=refiner_model_name, refiner_switch=refiner_switch,
229
+ loras=loras_model, advanced_params=advanced_params_obj, require_base64=require_base64, async_process=async_process)
230
+
231
+
232
+ class ImgInpaintOrOutpaintRequest(Text2ImgRequest):
233
+ input_image: UploadFile | None
234
+ input_mask: UploadFile | None
235
+ inpaint_additional_prompt: str | None
236
+ outpaint_selections: List[OutpaintExpansion]
237
+ outpaint_distance_left: int
238
+ outpaint_distance_right: int
239
+ outpaint_distance_top: int
240
+ outpaint_distance_bottom: int
241
+
242
+ @classmethod
243
+ def as_form(cls, input_image: UploadFile = Form(description="Init image for inpaint or outpaint"),
244
+ input_mask: UploadFile = Form(File(None), description="Inpaint or outpaint mask"),
245
+ inpaint_additional_prompt: str | None = Form(None, description="Describe what you want to inpaint"),
246
+ outpaint_selections: List[str] = Form([], description="Outpaint expansion selections, literal 'Left', 'Right', 'Top', 'Bottom' seperated by comma"),
247
+ outpaint_distance_left: int = Form(default=0, description="Set outpaint left distance, -1 for default"),
248
+ outpaint_distance_right: int = Form(default=0, description="Set outpaint right distance, -1 for default"),
249
+ outpaint_distance_top: int = Form(default=0, description="Set outpaint top distance, -1 for default"),
250
+ outpaint_distance_bottom: int = Form(default=0, description="Set outpaint bottom distance, -1 for default"),
251
+ prompt: str = Form(''),
252
+ negative_prompt: str = Form(default_prompt_negative),
253
+ style_selections: List[str] = Form(default_styles, description="Fooocus style selections, seperated by comma"),
254
+ performance_selection: PerfomanceSelection = Form(PerfomanceSelection.speed, description="Performance Selection, one of 'Speed','Quality','Extreme Speed'"),
255
+ aspect_ratios_selection: str = Form(default_aspect_ratio, description="Aspect Ratios Selection, default 1152*896"),
256
+ image_number: int = Form(default=1, description="Image number", ge=1, le=32),
257
+ image_seed: int = Form(default=-1, description="Seed to generate image, -1 for random"),
258
+ sharpness: float = Form(default=2.0, ge=0.0, le=30.0),
259
+ guidance_scale: float = Form(default=default_cfg_scale, ge=1.0, le=30.0),
260
+ base_model_name: str = Form(default_base_model_name),
261
+ refiner_model_name: str = Form(default_refiner_model_name),
262
+ refiner_switch: float = Form(default=default_refiner_switch, description="Refiner Switch At", ge=0.1, le=1.0),
263
+ loras: str | None = Form(default=default_loras_json, description='Lora config in JSON. Format as [{"model_name": "sd_xl_offset_example-lora_1.0.safetensors", "weight": 0.5}]'),
264
+ advanced_params: str| None = Form(default=None, description="Advanced parameters in JSON"),
265
+ require_base64: bool = Form(default=False, description="Return base64 data of generated image"),
266
+ async_process: bool = Form(default=False, description="Set to true will run async and return job info for retrieve generataion result later"),
267
+ ):
268
+
269
+ if isinstance(input_mask, File):
270
+ input_mask = None
271
+
272
+ outpaint_selections_arr = oupaint_selections_parser(outpaint_selections)
273
+ style_selection_arr = style_selection_parser(style_selections)
274
+ loras_model = lora_parser(loras)
275
+ advanced_params_obj = advanced_params_parser(advanced_params)
276
+
277
+ return cls(input_image=input_image, input_mask=input_mask, inpaint_additional_prompt=inpaint_additional_prompt,
278
+ outpaint_selections=outpaint_selections_arr,outpaint_distance_left=outpaint_distance_left,
279
+ outpaint_distance_right=outpaint_distance_right, outpaint_distance_top=outpaint_distance_top,
280
+ outpaint_distance_bottom=outpaint_distance_bottom, prompt=prompt, negative_prompt=negative_prompt, style_selections=style_selection_arr,
281
+ performance_selection=performance_selection, aspect_ratios_selection=aspect_ratios_selection,
282
+ image_number=image_number, image_seed=image_seed, sharpness=sharpness, guidance_scale=guidance_scale,
283
+ base_model_name=base_model_name, refiner_model_name=refiner_model_name, refiner_switch=refiner_switch,
284
+ loras=loras_model, advanced_params=advanced_params_obj, require_base64=require_base64, async_process=async_process)
285
+
286
+
287
+ class ImgPromptRequest(ImgInpaintOrOutpaintRequest):
288
+ image_prompts: List[ImagePrompt]
289
+
290
+ @classmethod
291
+ def as_form(cls, input_image: UploadFile = Form(File(None), description="Init image for inpaint or outpaint"),
292
+ input_mask: UploadFile = Form(File(None), description="Inpaint or outpaint mask"),
293
+ inpaint_additional_prompt: str | None = Form(None, description="Describe what you want to inpaint"),
294
+ outpaint_selections: List[str] = Form([], description="Outpaint expansion selections, literal 'Left', 'Right', 'Top', 'Bottom' seperated by comma"),
295
+ outpaint_distance_left: int = Form(default=0, description="Set outpaint left distance, 0 for default"),
296
+ outpaint_distance_right: int = Form(default=0, description="Set outpaint right distance, 0 for default"),
297
+ outpaint_distance_top: int = Form(default=0, description="Set outpaint top distance, 0 for default"),
298
+ outpaint_distance_bottom: int = Form(default=0, description="Set outpaint bottom distance, 0 for default"),
299
+ cn_img1: UploadFile = Form(File(None), description="Input image for image prompt"),
300
+ cn_stop1: float | None = Form(
301
+ default=None, ge=0, le=1, description="Stop at for image prompt, None for default value"),
302
+ cn_weight1: float | None = Form(
303
+ default=None, ge=0, le=2, description="Weight for image prompt, None for default value"),
304
+ cn_type1: ControlNetType = Form(
305
+ default=ControlNetType.cn_ip, description="ControlNet type for image prompt"),
306
+ cn_img2: UploadFile = Form(
307
+ File(None), description="Input image for image prompt"),
308
+ cn_stop2: float | None = Form(
309
+ default=None, ge=0, le=1, description="Stop at for image prompt, None for default value"),
310
+ cn_weight2: float | None = Form(
311
+ default=None, ge=0, le=2, description="Weight for image prompt, None for default value"),
312
+ cn_type2: ControlNetType = Form(
313
+ default=ControlNetType.cn_ip, description="ControlNet type for image prompt"),
314
+ cn_img3: UploadFile = Form(
315
+ File(None), description="Input image for image prompt"),
316
+ cn_stop3: float | None = Form(
317
+ default=None, ge=0, le=1, description="Stop at for image prompt, None for default value"),
318
+ cn_weight3: float | None = Form(
319
+ default=None, ge=0, le=2, description="Weight for image prompt, None for default value"),
320
+ cn_type3: ControlNetType = Form(
321
+ default=ControlNetType.cn_ip, description="ControlNet type for image prompt"),
322
+ cn_img4: UploadFile = Form(
323
+ File(None), description="Input image for image prompt"),
324
+ cn_stop4: float | None = Form(
325
+ default=None, ge=0, le=1, description="Stop at for image prompt, None for default value"),
326
+ cn_weight4: float | None = Form(
327
+ default=None, ge=0, le=2, description="Weight for image prompt, None for default value"),
328
+ cn_type4: ControlNetType = Form(
329
+ default=ControlNetType.cn_ip, description="ControlNet type for image prompt"),
330
+ prompt: str = Form(''),
331
+ negative_prompt: str = Form(default_prompt_negative),
332
+ style_selections: List[str] = Form(default_styles, description="Fooocus style selections, seperated by comma"),
333
+ performance_selection: PerfomanceSelection = Form(
334
+ PerfomanceSelection.speed),
335
+ aspect_ratios_selection: str = Form(default_aspect_ratio),
336
+ image_number: int = Form(
337
+ default=1, description="Image number", ge=1, le=32),
338
+ image_seed: int = Form(default=-1, description="Seed to generate image, -1 for random"),
339
+ sharpness: float = Form(default=2.0, ge=0.0, le=30.0),
340
+ guidance_scale: float = Form(default=default_cfg_scale, ge=1.0, le=30.0),
341
+ base_model_name: str = Form(default_base_model_name),
342
+ refiner_model_name: str = Form(default_refiner_model_name),
343
+ refiner_switch: float = Form(default=default_refiner_switch, description="Refiner Switch At", ge=0.1, le=1.0),
344
+ loras: str | None = Form(default=default_loras_json, description='Lora config in JSON. Format as [{"model_name": "sd_xl_offset_example-lora_1.0.safetensors", "weight": 0.5}]'),
345
+ advanced_params: str| None = Form(default=None, description="Advanced parameters in JSON"),
346
+ require_base64: bool = Form(default=False, description="Return base64 data of generated image"),
347
+ async_process: bool = Form(default=False, description="Set to true will run async and return job info for retrieve generataion result later"),
348
+ ):
349
+ if isinstance(input_image, File):
350
+ input_image = None
351
+ if isinstance(input_mask, File):
352
+ input_mask = None
353
+ if isinstance(cn_img1, File):
354
+ cn_img1 = None
355
+ if isinstance(cn_img2, File):
356
+ cn_img2 = None
357
+ if isinstance(cn_img3, File):
358
+ cn_img3 = None
359
+ if isinstance(cn_img4, File):
360
+ cn_img4 = None
361
+
362
+ outpaint_selections_arr = oupaint_selections_parser(outpaint_selections)
363
+
364
+ image_prompt_config = [(cn_img1, cn_stop1, cn_weight1, cn_type1),
365
+ (cn_img2, cn_stop2, cn_weight2, cn_type2),
366
+ (cn_img3, cn_stop3, cn_weight3, cn_type3),
367
+ (cn_img4, cn_stop4, cn_weight4, cn_type4)]
368
+ image_prompts = image_prompt_parser(image_prompt_config)
369
+ style_selection_arr = style_selection_parser(style_selections)
370
+ loras_model = lora_parser(loras)
371
+ advanced_params_obj = advanced_params_parser(advanced_params)
372
+
373
+ return cls(input_image=input_image, input_mask=input_mask, inpaint_additional_prompt=inpaint_additional_prompt, outpaint_selections=outpaint_selections_arr,
374
+ outpaint_distance_left=outpaint_distance_left, outpaint_distance_right=outpaint_distance_right, outpaint_distance_top=outpaint_distance_top, outpaint_distance_bottom=outpaint_distance_bottom,
375
+ image_prompts=image_prompts, prompt=prompt, negative_prompt=negative_prompt, style_selections=style_selection_arr,
376
+ performance_selection=performance_selection, aspect_ratios_selection=aspect_ratios_selection,
377
+ image_number=image_number, image_seed=image_seed, sharpness=sharpness, guidance_scale=guidance_scale,
378
+ base_model_name=base_model_name, refiner_model_name=refiner_model_name, refiner_switch=refiner_switch,
379
+ loras=loras_model, advanced_params=advanced_params_obj, require_base64=require_base64, async_process=async_process)
380
+
381
+
382
+ class GeneratedImageResult(BaseModel):
383
+ base64: str | None = Field(
384
+ description="Image encoded in base64, or null if finishReasen is not 'SUCCESS', only return when request require base64")
385
+ url: str | None = Field(description="Image file static serve url, or null if finishReasen is not 'SUCCESS'")
386
+ seed: str = Field(description="The seed associated with this image")
387
+ finish_reason: GenerationFinishReason
388
+
389
+
390
+ class DescribeImageType(str, Enum):
391
+ photo = 'Photo'
392
+ anime = 'Anime'
393
+
394
+
395
+ class DescribeImageResponse(BaseModel):
396
+ describe: str
397
+
398
+
399
+ class AsyncJobStage(str, Enum):
400
+ waiting = 'WAITING'
401
+ running = 'RUNNING'
402
+ success = 'SUCCESS'
403
+ error = 'ERROR'
404
+
405
+
406
+ class QueryJobRequest(BaseModel):
407
+ job_id: str = Field(description="Job ID to query")
408
+ require_step_preivew: bool = Field(False, description="Set to true will return preview image of generation steps at current time")
409
+
410
+
411
+ class AsyncJobResponse(BaseModel):
412
+ job_id: str = Field(description="Job ID")
413
+ job_type: TaskType = Field(description="Job type")
414
+ job_stage: AsyncJobStage = Field(description="Job running stage")
415
+ job_progress: int = Field(description="Job running progress, 100 is for finished.")
416
+ job_status: str | None = Field(None, description="Job running status in text")
417
+ job_step_preview: str | None = Field(None, description="Preview image of generation steps at current time, as base64 image")
418
+ job_result: List[GeneratedImageResult] | None = Field(None, description="Job generation result")
419
+
420
+
421
+ class JobQueueInfo(BaseModel):
422
+ running_size: int = Field(description="The current running and waiting job count")
423
+ finished_size: int = Field(description="Finished job cound (after auto clean)")
424
+ last_job_id: str = Field(description="Last submit generation job id")
425
+
426
+
427
+ # TODO May need more detail fields, will add later when someone need
428
+ class JobHistoryInfo(BaseModel):
429
+ job_id: str
430
+ is_finished: bool = False
431
+
432
+
433
+ # Response model for the historical tasks
434
+ class JobHistoryResponse(BaseModel):
435
+ queue: List[JobHistoryInfo] = []
436
+ history: List[JobHistoryInfo] = []
437
+
438
+
439
+ class AllModelNamesResponse(BaseModel):
440
+ model_filenames: List[str] = Field(description="All available model filenames")
441
+ lora_filenames: List[str] = Field(description="All available lora filenames")
442
+
443
+ model_config = ConfigDict(
444
+ protected_namespaces=('protect_me_', 'also_protect_')
445
+ )
446
+
447
+
448
+ class StopResponse(BaseModel):
449
+ msg: str
Fooocus-API/fooocusapi/models_v2.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fooocusapi.models import *
2
+
3
+ class ImagePromptJson(BaseModel):
4
+ cn_img: str | None = Field(None, description="Input image for image prompt as base64")
5
+ cn_stop: float | None = Field(0, ge=0, le=1, description="Stop at for image prompt, 0 for default value")
6
+ cn_weight: float | None = Field(0, ge=0, le=2, description="Weight for image prompt, 0 for default value")
7
+ cn_type: ControlNetType = Field(default=ControlNetType.cn_ip, description="ControlNet type for image prompt")
8
+
9
+ class ImgInpaintOrOutpaintRequestJson(Text2ImgRequest):
10
+ input_image: str = Field(description="Init image for inpaint or outpaint as base64")
11
+ input_mask: str | None = Field('', description="Inpaint or outpaint mask as base64")
12
+ inpaint_additional_prompt: str | None = Field('', description="Describe what you want to inpaint")
13
+ outpaint_selections: List[OutpaintExpansion] = []
14
+ outpaint_distance_left: int | None = Field(-1, description="Set outpaint left distance")
15
+ outpaint_distance_right: int | None = Field(-1, description="Set outpaint right distance")
16
+ outpaint_distance_top: int | None = Field(-1, description="Set outpaint top distance")
17
+ outpaint_distance_bottom: int | None = Field(-1, description="Set outpaint bottom distance")
18
+ image_prompts: List[ImagePromptJson | ImagePrompt] = []
19
+
20
+ class ImgPromptRequestJson(ImgInpaintOrOutpaintRequestJson):
21
+ input_image: str | None = Field(None, description="Init image for inpaint or outpaint as base64")
22
+ image_prompts: List[ImagePromptJson | ImagePrompt]
23
+
24
+ class Text2ImgRequestWithPrompt(Text2ImgRequest):
25
+ image_prompts: List[ImagePromptJson] = []
26
+
27
+ class ImgUpscaleOrVaryRequestJson(Text2ImgRequest):
28
+ uov_method: UpscaleOrVaryMethod = "Upscale (2x)"
29
+ upscale_value: float | None = Field(1.0, ge=1.0, le=5.0, description="Upscale custom value, 1.0 for default value")
30
+ input_image: str = Field(description="Init image for upsacale or outpaint as base64")
31
+ image_prompts: List[ImagePromptJson | ImagePrompt] = []
Fooocus-API/fooocusapi/parameters.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import Dict, List, Tuple
3
+ import numpy as np
4
+
5
+
6
+ default_inpaint_engine_version = 'v2.6'
7
+
8
+
9
+ default_styles = ['Fooocus V2', 'Fooocus Enhance', 'Fooocus Sharp']
10
+ default_base_model_name = 'juggernautXL_version6Rundiffusion.safetensors'
11
+ default_refiner_model_name = 'None'
12
+ default_refiner_switch = 0.5
13
+ default_loras = [['sd_xl_offset_example-lora_1.0.safetensors', 0.1]]
14
+ default_lora_name = 'sd_xl_offset_example-lora_1.0.safetensors'
15
+ default_lora_weight = 0.1
16
+ default_cfg_scale = 4.0
17
+ default_prompt_negative = ''
18
+ default_aspect_ratio = '1152*896'
19
+ default_sampler = 'dpmpp_2m_sde_gpu'
20
+ default_scheduler = 'karras'
21
+
22
+
23
+ available_aspect_ratios = [
24
+ '704*1408',
25
+ '704*1344',
26
+ '768*1344',
27
+ '768*1280',
28
+ '832*1216',
29
+ '832*1152',
30
+ '896*1152',
31
+ '896*1088',
32
+ '960*1088',
33
+ '960*1024',
34
+ '1024*1024',
35
+ '1024*960',
36
+ '1088*960',
37
+ '1088*896',
38
+ '1152*896',
39
+ '1152*832',
40
+ '1216*832',
41
+ '1280*768',
42
+ '1344*768',
43
+ '1344*704',
44
+ '1408*704',
45
+ '1472*704',
46
+ '1536*640',
47
+ '1600*640',
48
+ '1664*576',
49
+ '1728*576',
50
+ ]
51
+
52
+ uov_methods = [
53
+ 'Disabled', 'Vary (Subtle)', 'Vary (Strong)', 'Upscale (1.5x)', 'Upscale (2x)', 'Upscale (Fast 2x)', 'Upscale (Custom)'
54
+ ]
55
+
56
+
57
+ outpaint_expansions = [
58
+ 'Left', 'Right', 'Top', 'Bottom'
59
+ ]
60
+
61
+
62
+ def get_aspect_ratio_value(label: str) -> str:
63
+ return label.split(' ')[0].replace('×', '*')
64
+
65
+
66
+ class GenerationFinishReason(str, Enum):
67
+ success = 'SUCCESS'
68
+ queue_is_full = 'QUEUE_IS_FULL'
69
+ user_cancel = 'USER_CANCEL'
70
+ error = 'ERROR'
71
+
72
+
73
+ class ImageGenerationResult(object):
74
+ def __init__(self, im: str | None, seed: str, finish_reason: GenerationFinishReason):
75
+ self.im = im
76
+ self.seed = seed
77
+ self.finish_reason = finish_reason
78
+
79
+
80
+ class ImageGenerationParams(object):
81
+ def __init__(self, prompt: str,
82
+ negative_prompt: str,
83
+ style_selections: List[str],
84
+ performance_selection: str,
85
+ aspect_ratios_selection: str,
86
+ image_number: int,
87
+ image_seed: int | None,
88
+ sharpness: float,
89
+ guidance_scale: float,
90
+ base_model_name: str,
91
+ refiner_model_name: str,
92
+ refiner_switch: float,
93
+ loras: List[Tuple[str, float]],
94
+ uov_input_image: np.ndarray | None,
95
+ uov_method: str,
96
+ upscale_value: float | None,
97
+ outpaint_selections: List[str],
98
+ outpaint_distance_left: int,
99
+ outpaint_distance_right: int,
100
+ outpaint_distance_top: int,
101
+ outpaint_distance_bottom: int,
102
+ inpaint_input_image: Dict[str, np.ndarray] | None,
103
+ inpaint_additional_prompt: str | None,
104
+ image_prompts: List[Tuple[np.ndarray, float, float, str]],
105
+ advanced_params: List[any] | None):
106
+ self.prompt = prompt
107
+ self.negative_prompt = negative_prompt
108
+ self.style_selections = style_selections
109
+ self.performance_selection = performance_selection
110
+ self.aspect_ratios_selection = aspect_ratios_selection
111
+ self.image_number = image_number
112
+ self.image_seed = image_seed
113
+ self.sharpness = sharpness
114
+ self.guidance_scale = guidance_scale
115
+ self.base_model_name = base_model_name
116
+ self.refiner_model_name = refiner_model_name
117
+ self.refiner_switch = refiner_switch
118
+ self.loras = loras
119
+ self.uov_input_image = uov_input_image
120
+ self.uov_method = uov_method
121
+ self.upscale_value = upscale_value
122
+ self.outpaint_selections = outpaint_selections
123
+ self.outpaint_distance_left = outpaint_distance_left
124
+ self.outpaint_distance_right = outpaint_distance_right
125
+ self.outpaint_distance_top = outpaint_distance_top
126
+ self.outpaint_distance_bottom = outpaint_distance_bottom
127
+ self.inpaint_input_image = inpaint_input_image
128
+ self.inpaint_additional_prompt = inpaint_additional_prompt
129
+ self.image_prompts = image_prompts
130
+
131
+ if advanced_params is None:
132
+ disable_preview = False
133
+ adm_scaler_positive = 1.5
134
+ adm_scaler_negative = 0.8
135
+ adm_scaler_end = 0.3
136
+ adaptive_cfg = 7.0
137
+ sampler_name = default_sampler
138
+ scheduler_name = default_scheduler
139
+ generate_image_grid = False
140
+ overwrite_step = -1
141
+ overwrite_switch = -1
142
+ overwrite_width = -1
143
+ overwrite_height = -1
144
+ overwrite_vary_strength = -1
145
+ overwrite_upscale_strength = -1
146
+ mixing_image_prompt_and_vary_upscale = False
147
+ mixing_image_prompt_and_inpaint = False
148
+ debugging_cn_preprocessor = False
149
+ skipping_cn_preprocessor = False
150
+ controlnet_softness = 0.25
151
+ canny_low_threshold = 64
152
+ canny_high_threshold = 128
153
+ refiner_swap_method = 'joint'
154
+ freeu_enabled = False
155
+ freeu_b1, freeu_b2, freeu_s1, freeu_s2 = [None] * 4
156
+ debugging_inpaint_preprocessor = False
157
+ inpaint_disable_initial_latent = False
158
+ inpaint_engine = default_inpaint_engine_version
159
+ inpaint_strength = 1.0
160
+ inpaint_respective_field = 0.618
161
+ inpaint_mask_upload_checkbox = False
162
+ invert_mask_checkbox = False
163
+ inpaint_erode_or_dilate = 0
164
+
165
+
166
+ # Auto set mixing_image_prompt_and_inpaint to True
167
+ if len(self.image_prompts) > 0 and inpaint_input_image is not None:
168
+ print('Mixing Image Prompts and Inpaint Enabled')
169
+ mixing_image_prompt_and_inpaint = True
170
+ if len(self.image_prompts) > 0 and uov_input_image is not None:
171
+ print('Mixing Image Prompts and Vary Upscale Enabled')
172
+ mixing_image_prompt_and_vary_upscale = True
173
+
174
+ self.advanced_params = [
175
+ disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \
176
+ scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \
177
+ overwrite_vary_strength, overwrite_upscale_strength, \
178
+ mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \
179
+ debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \
180
+ refiner_swap_method, \
181
+ freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \
182
+ debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, \
183
+ inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate
184
+ ]
185
+ else:
186
+ self.advanced_params = advanced_params
Fooocus-API/fooocusapi/repositories_versions.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import os
2
+
3
+ fooocus_version = '2.1.860'
4
+ fooocus_commit_hash = os.environ.get(
5
+ 'FOOOCUS_COMMIT_HASH', "624f74a1ed78ea09467c856cef35aeee0af863f6")
Fooocus-API/fooocusapi/sql_client.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import platform
4
+ from datetime import datetime
5
+ from typing import Optional
6
+
7
+ from sqlalchemy import Integer, Float,VARCHAR, Boolean, JSON, Text, create_engine
8
+ from sqlalchemy.orm import declarative_base, Session, Mapped, mapped_column
9
+
10
+
11
+ Base = declarative_base()
12
+
13
+ adv_params_keys = [
14
+ "disable_preview",
15
+ "adm_scaler_positive",
16
+ "adm_scaler_negative",
17
+ "adm_scaler_end",
18
+ "adaptive_cfg",
19
+ "sampler_name",
20
+ "scheduler_name",
21
+ "generate_image_grid",
22
+ "overwrite_step",
23
+ "overwrite_switch",
24
+ "overwrite_width",
25
+ "overwrite_height",
26
+ "overwrite_vary_strength",
27
+ "overwrite_upscale_strength",
28
+ "mixing_image_prompt_and_vary_upscale",
29
+ "mixing_image_prompt_and_inpaint",
30
+ "debugging_cn_preprocessor",
31
+ "skipping_cn_preprocessor",
32
+ "controlnet_softness",
33
+ "canny_low_threshold",
34
+ "canny_high_threshold",
35
+ "refiner_swap_method",
36
+ "freeu_enabled",
37
+ "freeu_b1",
38
+ "freeu_b2",
39
+ "freeu_s1",
40
+ "freeu_s2",
41
+ "debugging_inpaint_preprocessor",
42
+ "inpaint_disable_initial_latent",
43
+ "inpaint_engine",
44
+ "inpaint_strength",
45
+ "inpaint_respective_field",
46
+ "inpaint_mask_upload_checkbox",
47
+ "invert_mask_checkbox",
48
+ "inpaint_erode_or_dilate"
49
+ ]
50
+
51
+ if platform.system().lower() == 'windows':
52
+ default_sqlite_db_path = os.path.join(os.path.dirname(__file__), "../database.db").replace("\\", "/")
53
+ else:
54
+ default_sqlite_db_path = os.path.join(os.path.dirname(__file__), "../database.db")
55
+
56
+ connection_uri = os.environ.get("FOOOCUS_DB_CONF", f"sqlite:///{default_sqlite_db_path}")
57
+
58
+
59
+ class GenerateRecord(Base):
60
+ __tablename__ = 'generate_record'
61
+
62
+ task_id: Mapped[str] = mapped_column(VARCHAR(255), nullable=False, primary_key=True)
63
+ task_type: Mapped[str] = mapped_column(Text, nullable=False)
64
+ result_url: Mapped[str] = mapped_column(Text, nullable=True)
65
+ finish_reason: Mapped[str] = mapped_column(Text, nullable=True)
66
+ date_time: Mapped[int] = mapped_column(Integer, nullable=False)
67
+
68
+ prompt: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
69
+ negative_prompt: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
70
+ style_selections: Mapped[Optional[list]] = mapped_column(JSON, nullable=True)
71
+ performance_selection: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
72
+ aspect_ratios_selection: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
73
+ base_model_name: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
74
+ refiner_model_name: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
75
+ refiner_switch: Mapped[Optional[float]] = mapped_column(Float, nullable=True)
76
+ loras: Mapped[Optional[list]] = mapped_column(JSON, nullable=True)
77
+ image_number: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
78
+ image_seed: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
79
+ sharpness: Mapped[Optional[float]] = mapped_column(Float, nullable=True)
80
+ guidance_scale: Mapped[Optional[float]] = mapped_column(Float, nullable=True)
81
+ advanced_params: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True)
82
+
83
+ input_image: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
84
+ input_mask: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
85
+ image_prompts: Mapped[Optional[list]] = mapped_column(JSON, nullable=True)
86
+ inpaint_additional_prompt: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
87
+ outpaint_selections: Mapped[Optional[list]] = mapped_column(JSON, nullable=True)
88
+ outpaint_distance_left: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
89
+ outpaint_distance_right: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
90
+ outpaint_distance_top: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
91
+ outpaint_distance_bottom: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
92
+ uov_method: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
93
+ upscale_value: Mapped[Optional[float]] = mapped_column(Float, nullable=True)
94
+
95
+ webhook_url: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
96
+ require_base64: Mapped[Optional[bool]] = mapped_column(Boolean, nullable=True)
97
+ async_process: Mapped[Optional[bool]] = mapped_column(Boolean, nullable=True)
98
+
99
+ def __repr__(self) -> str:
100
+ return f"GenerateRecord(task_id={self.task_id!r}, task_type={self.task_type!r}, \
101
+ result_url={self.result_url!r}, finish_reason={self.finish_reason!r}, date_time={self.date_time!r}, \
102
+ prompt={self.prompt!r}, negative_prompt={self.negative_prompt!r}, style_selections={self.style_selections!r}, performance_selection={self.performance_selection!r}, \
103
+ aspect_ratios_selection={self.aspect_ratios_selection!r}, base_model_name={self.base_model_name!r}, \
104
+ refiner_model_name={self.refiner_model_name!r}, refiner_switch={self.refiner_switch!r}, loras={self.loras!r}, \
105
+ image_number={self.image_number!r}, image_seed={self.image_seed!r}, sharpness={self.sharpness!r}, \
106
+ guidance_scale={self.guidance_scale!r}, advanced_params={self.advanced_params!r}, input_image={self.input_image!r}, \
107
+ input_mask={self.input_mask!r}, image_prompts={self.image_prompts!r}, inpaint_additional_prompt={self.inpaint_additional_prompt!r}, \
108
+ outpaint_selections={self.outpaint_selections!r}, outpaint_distance_left={self.outpaint_distance_left!r}, outpaint_distance_right={self.outpaint_distance_right!r}, \
109
+ outpaint_distance_top={self.outpaint_distance_top!r}, outpaint_distance_bottom={self.outpaint_distance_bottom!r}, uov_method={self.uov_method!r}, \
110
+ upscale_value={self.upscale_value!r}, webhook_url={self.webhook_url!r}, require_base64={self.require_base64!r}, \
111
+ async_process={self.async_process!r})"
112
+
113
+ engine = create_engine(connection_uri)
114
+
115
+ session = Session(engine)
116
+ Base.metadata.create_all(engine, checkfirst=True)
117
+ session.close()
118
+
119
+
120
+ def convert_to_dict_list(obj_list: list[object]) -> dict:
121
+ dict_list = []
122
+ for obj in obj_list:
123
+ # 将对象属性转化为字典键值对
124
+ dict_obj = {}
125
+ for attr, value in vars(obj).items():
126
+ if not callable(value) and not attr.startswith("__") and not attr.startswith("_"):
127
+ dict_obj[attr] = value
128
+ task_info = {
129
+ "task_id": obj.task_id,
130
+ "task_type": obj.task_type,
131
+ "result_url": obj.result_url,
132
+ "finish_reason": obj.finish_reason,
133
+ "date_time": datetime.fromtimestamp(obj.date_time).strftime("%Y-%m-%d %H:%M:%S"),
134
+ }
135
+ del dict_obj['task_id']
136
+ del dict_obj['task_type']
137
+ del dict_obj['result_url']
138
+ del dict_obj['finish_reason']
139
+ del dict_obj['date_time']
140
+ dict_list.append({"params": dict_obj, "task_info": task_info})
141
+ return dict_list
142
+
143
+
144
+
145
+ class MysqlSQLAlchemy:
146
+ def __init__(self, connection_uri: str):
147
+ # 'mysql+pymysql://{username}:{password}@{host}:{port}/{database}'
148
+ self.engine = create_engine(connection_uri)
149
+ self.session = Session(self.engine)
150
+
151
+ def store_history(self, record: dict) -> None:
152
+ """
153
+ Store history to database
154
+ :param record:
155
+ :return:
156
+ """
157
+ self.session.add_all([GenerateRecord(**record)])
158
+ self.session.commit()
159
+
160
+ def get_history(self, task_id: str=None, page: int=0, page_size: int=20,
161
+ order_by: str='date_time') -> list:
162
+ """
163
+ Get history from database
164
+ :param task_id:
165
+ :return:
166
+ """
167
+ if task_id is not None:
168
+ res = self.session.query(GenerateRecord).filter(GenerateRecord.task_id == task_id).all()
169
+ if len(res) == 0:
170
+ return []
171
+ return convert_to_dict_list(res)
172
+
173
+ res = self.session.query(GenerateRecord).order_by(getattr(GenerateRecord, order_by).desc()).offset(page * page_size).limit(page_size).all()
174
+ if len(res) == 0:
175
+ return []
176
+ return convert_to_dict_list(res)
177
+
178
+
179
+ db = MysqlSQLAlchemy(connection_uri=connection_uri)
180
+ def req_to_dict(req: dict) -> dict:
181
+ req["loras"] = [{"model_name": lora[0], "weight": lora[1]} for lora in req["loras"]]
182
+ req["advanced_params"] = dict(zip(adv_params_keys, req["advanced_params"]))
183
+ req["image_prompts"] = [{
184
+ "cn_img": "",
185
+ "cn_stop": image[1],
186
+ "cn_weight": image[2],
187
+ "cn_type": image[3]
188
+ } for image in req["image_prompts"]]
189
+ del req["inpaint_input_image"]
190
+ del req["uov_input_image"]
191
+ return req
192
+
193
+ def add_history(params: dict, task_type: str, task_id: str, result_url: str, finish_reason: str) -> None:
194
+ params = req_to_dict(params["params"])
195
+ params["date_time"] = int(time.time())
196
+ params["task_type"] = task_type
197
+ params["task_id"] = task_id
198
+ params["result_url"] = result_url
199
+ params["finish_reason"] = finish_reason
200
+
201
+ db.store_history(params)
202
+
203
+
204
+ def query_history(task_id: str=None, page: int=0, page_size: int=20, order_by: str="date_time") -> list:
205
+ return db.get_history(task_id=task_id, page=page, page_size=page_size, order_by=order_by)
Fooocus-API/fooocusapi/task_queue.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+ import time
3
+ import requests
4
+ import numpy as np
5
+
6
+ from enum import Enum
7
+ from typing import List, Tuple
8
+
9
+ from fooocusapi.args import args
10
+ from fooocusapi.file_utils import delete_output_file, get_file_serve_url
11
+ from fooocusapi.img_utils import narray_to_base64img
12
+ from fooocusapi.sql_client import add_history
13
+ from fooocusapi.parameters import ImageGenerationResult, GenerationFinishReason
14
+
15
+ class TaskType(str, Enum):
16
+ text_2_img = 'Text to Image'
17
+ img_uov = 'Image Upscale or Variation'
18
+ img_inpaint_outpaint = 'Image Inpaint or Outpaint'
19
+ img_prompt = 'Image Prompt'
20
+ not_found = 'Not Found'
21
+
22
+
23
+ class QueueTask(object):
24
+ job_id: str
25
+ is_finished: bool = False
26
+ finish_progress: int = 0
27
+ start_millis: int = 0
28
+ finish_millis: int = 0
29
+ finish_with_error: bool = False
30
+ task_status: str | None = None
31
+ task_step_preview: str | None = None
32
+ task_result: List[ImageGenerationResult] = None
33
+ error_message: str | None = None
34
+ webhook_url: str | None = None # attribute for individual webhook_url
35
+
36
+ def __init__(self, job_id: str, type: TaskType, req_param: dict, in_queue_millis: int,
37
+ webhook_url: str | None = None):
38
+ self.job_id = job_id
39
+ self.type = type
40
+ self.req_param = req_param
41
+ self.in_queue_millis = in_queue_millis
42
+ self.webhook_url = webhook_url
43
+
44
+ def set_progress(self, progress: int, status: str | None):
45
+ if progress > 100:
46
+ progress = 100
47
+ self.finish_progress = progress
48
+ self.task_status = status
49
+
50
+ def set_step_preview(self, task_step_preview: str | None):
51
+ self.task_step_preview = task_step_preview
52
+
53
+ def set_result(self, task_result: List[ImageGenerationResult], finish_with_error: bool, error_message: str | None = None):
54
+ if not finish_with_error:
55
+ self.finish_progress = 100
56
+ self.task_status = 'Finished'
57
+ self.task_result = task_result
58
+ self.finish_with_error = finish_with_error
59
+ self.error_message = error_message
60
+
61
+
62
+ class TaskQueue(object):
63
+ queue: List[QueueTask] = []
64
+ history: List[QueueTask] = []
65
+ last_job_id = None
66
+ webhook_url: str | None = None
67
+
68
+ def __init__(self, queue_size: int, hisotry_size: int, webhook_url: str | None = None):
69
+ self.queue_size = queue_size
70
+ self.history_size = hisotry_size
71
+ self.webhook_url = webhook_url
72
+
73
+ def add_task(self, type: TaskType, req_param: dict, webhook_url: str | None = None) -> QueueTask | None:
74
+ """
75
+ Create and add task to queue
76
+ :returns: The created task's job_id, or None if reach the queue size limit
77
+ """
78
+ if len(self.queue) >= self.queue_size:
79
+ return None
80
+
81
+ job_id = str(uuid.uuid4())
82
+ task = QueueTask(job_id=job_id, type=type, req_param=req_param,
83
+ in_queue_millis=int(round(time.time() * 1000)),
84
+ webhook_url=webhook_url)
85
+ self.queue.append(task)
86
+ self.last_job_id = job_id
87
+ return task
88
+
89
+ def get_task(self, job_id: str, include_history: bool = False) -> QueueTask | None:
90
+ for task in self.queue:
91
+ if task.job_id == job_id:
92
+ return task
93
+
94
+ if include_history:
95
+ for task in self.history:
96
+ if task.job_id == job_id:
97
+ return task
98
+
99
+ return None
100
+
101
+ def is_task_ready_to_start(self, job_id: str) -> bool:
102
+ task = self.get_task(job_id)
103
+ if task is None:
104
+ return False
105
+
106
+ return self.queue[0].job_id == job_id
107
+
108
+ def start_task(self, job_id: str):
109
+ task = self.get_task(job_id)
110
+ if task is not None:
111
+ task.start_millis = int(round(time.time() * 1000))
112
+
113
+ def finish_task(self, job_id: str):
114
+ task = self.get_task(job_id)
115
+ if task is not None:
116
+ task.is_finished = True
117
+ task.finish_millis = int(round(time.time() * 1000))
118
+
119
+ # Use the task's webhook_url if available, else use the default
120
+ webhook_url = task.webhook_url or self.webhook_url
121
+
122
+ data = { "job_id": task.job_id, "job_result": [] }
123
+ for item in task.task_result:
124
+ data["job_result"].append({
125
+ "url": get_file_serve_url(item.im) if item.im else None,
126
+ "seed": item.seed if item.seed else "-1",
127
+ })
128
+
129
+ # Send webhook
130
+ if task.is_finished and webhook_url:
131
+ try:
132
+ res = requests.post(webhook_url, json=data)
133
+ print(f'Call webhook response status: {res.status_code}')
134
+ except Exception as e:
135
+ print('Call webhook error:', e)
136
+
137
+ # Move task to history
138
+ self.queue.remove(task)
139
+ self.history.append(task)
140
+
141
+ if args.presistent:
142
+ add_history(task.req_param, task.type, task.job_id,
143
+ ','.join([job["url"] for job in data["job_result"]]),
144
+ task.task_result[0].finish_reason)
145
+
146
+ # Clean history
147
+ if len(self.history) > self.history_size and self.history_size != 0:
148
+ removed_task = self.history.pop(0)
149
+ if isinstance(removed_task.task_result, List):
150
+ for item in removed_task.task_result:
151
+ if isinstance(item, ImageGenerationResult) and item.finish_reason == GenerationFinishReason.success and item.im is not None:
152
+ delete_output_file(item.im)
153
+ print(f"Clean task history, remove task: {removed_task.job_id}")
154
+
155
+
156
+ class TaskOutputs:
157
+ outputs = []
158
+
159
+ def __init__(self, task: QueueTask):
160
+ self.task = task
161
+
162
+ def append(self, args: List[any]):
163
+ self.outputs.append(args)
164
+ if len(args) >= 2:
165
+ if args[0] == 'preview' and isinstance(args[1], Tuple) and len(args[1]) >= 2:
166
+ number = args[1][0]
167
+ text = args[1][1]
168
+ self.task.set_progress(number, text)
169
+ if len(args[1]) >= 3 and isinstance(args[1][2], np.ndarray):
170
+ base64_preview_img = narray_to_base64img(args[1][2])
171
+ self.task.set_step_preview(base64_preview_img)
Fooocus-API/fooocusapi/worker.py ADDED
@@ -0,0 +1,867 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import random
3
+ import time
4
+ import numpy as np
5
+ import torch
6
+ import re
7
+ import logging
8
+
9
+ from typing import List
10
+ from fooocusapi.file_utils import save_output_file
11
+ from fooocusapi.parameters import GenerationFinishReason, ImageGenerationParams, ImageGenerationResult
12
+ from fooocusapi.task_queue import QueueTask, TaskQueue, TaskOutputs
13
+
14
+
15
+ task_queue = TaskQueue(queue_size=3, hisotry_size=6, webhook_url=None)
16
+
17
+
18
+ def process_top():
19
+ import ldm_patched.modules.model_management
20
+ ldm_patched.modules.model_management.interrupt_current_processing()
21
+
22
+
23
+ @torch.no_grad()
24
+ @torch.inference_mode()
25
+ def process_generate(async_task: QueueTask, params: ImageGenerationParams) -> List[ImageGenerationResult]:
26
+ try:
27
+ import modules.default_pipeline as pipeline
28
+ except Exception as e:
29
+ print('Import default pipeline error:', e)
30
+ if not async_task.is_finished:
31
+ task_queue.finish_task(async_task.job_id)
32
+ async_task.set_result([], True, str(e))
33
+ print(f"[Task Queue] Finish task with error, seq={async_task.job_id}")
34
+ return []
35
+
36
+ import modules.patch as patch
37
+ import modules.flags as flags
38
+ import modules.core as core
39
+ import modules.inpaint_worker as inpaint_worker
40
+ import modules.config as config
41
+ import modules.advanced_parameters as advanced_parameters
42
+ import modules.constants as constants
43
+ import extras.preprocessors as preprocessors
44
+ import extras.ip_adapter as ip_adapter
45
+ import extras.face_crop as face_crop
46
+ import ldm_patched.modules.model_management as model_management
47
+ from modules.util import remove_empty_str, resize_image, HWC3, set_image_shape_ceil, get_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate
48
+ from modules.private_logger import log
49
+ from modules.upscaler import perform_upscale
50
+ from extras.expansion import safe_str
51
+ from modules.sdxl_styles import apply_style, fooocus_expansion, apply_wildcards
52
+ import fooocus_version
53
+
54
+ outputs = TaskOutputs(async_task)
55
+ results = []
56
+
57
+ def refresh_seed(r, seed_string):
58
+ if r:
59
+ return random.randint(constants.MIN_SEED, constants.MAX_SEED)
60
+ else:
61
+ try:
62
+ seed_value = int(seed_string)
63
+ if constants.MIN_SEED <= seed_value <= constants.MAX_SEED:
64
+ return seed_value
65
+ except ValueError:
66
+ pass
67
+ return random.randint(constants.MIN_SEED, constants.MAX_SEED)
68
+
69
+ def progressbar(_, number, text):
70
+ print(f'[Fooocus] {text}')
71
+ outputs.append(['preview', (number, text, None)])
72
+
73
+ def yield_result(_, imgs, tasks):
74
+ if not isinstance(imgs, list):
75
+ imgs = [imgs]
76
+
77
+ results = []
78
+ for i, im in enumerate(imgs):
79
+ seed = -1 if len(tasks) == 0 else tasks[i]['task_seed']
80
+ img_filename = save_output_file(im)
81
+ results.append(ImageGenerationResult(im=img_filename, seed=str(seed), finish_reason=GenerationFinishReason.success))
82
+ async_task.set_result(results, False)
83
+ task_queue.finish_task(async_task.job_id)
84
+ print(f"[Task Queue] Finish task, job_id={async_task.job_id}")
85
+
86
+ outputs.append(['results', imgs])
87
+ pipeline.prepare_text_encoder(async_call=True)
88
+ return results
89
+
90
+ try:
91
+ waiting_sleep_steps: int = 0
92
+ waiting_start_time = time.perf_counter()
93
+ while not task_queue.is_task_ready_to_start(async_task.job_id):
94
+ if waiting_sleep_steps == 0:
95
+ print(
96
+ f"[Task Queue] Waiting for task queue become free, job_id={async_task.job_id}")
97
+ delay = 0.1
98
+ time.sleep(delay)
99
+ waiting_sleep_steps += 1
100
+ if waiting_sleep_steps % int(10 / delay) == 0:
101
+ waiting_time = time.perf_counter() - waiting_start_time
102
+ print(
103
+ f"[Task Queue] Already waiting for {waiting_time}S, seq={async_task.job_id}")
104
+
105
+ print(f"[Task Queue] Task queue is free, start task, job_id={async_task.job_id}")
106
+
107
+ task_queue.start_task(async_task.job_id)
108
+
109
+ execution_start_time = time.perf_counter()
110
+
111
+ # Transform parameters
112
+ prompt = params.prompt
113
+ negative_prompt = params.negative_prompt
114
+ style_selections = params.style_selections
115
+ performance_selection = params.performance_selection
116
+ aspect_ratios_selection = params.aspect_ratios_selection
117
+ image_number = params.image_number
118
+ image_seed = None if params.image_seed == -1 else params.image_seed
119
+ sharpness = params.sharpness
120
+ guidance_scale = params.guidance_scale
121
+ base_model_name = params.base_model_name
122
+ refiner_model_name = params.refiner_model_name
123
+ refiner_switch = params.refiner_switch
124
+ loras = params.loras
125
+ input_image_checkbox = params.uov_input_image is not None or params.inpaint_input_image is not None or len(params.image_prompts) > 0
126
+ current_tab = 'uov' if params.uov_method != flags.disabled else 'ip' if len(params.image_prompts) > 0 else 'inpaint' if params.inpaint_input_image is not None else None
127
+ uov_method = params.uov_method
128
+ upscale_value = params.upscale_value
129
+ uov_input_image = params.uov_input_image
130
+ outpaint_selections = params.outpaint_selections
131
+ outpaint_distance_left = params.outpaint_distance_left
132
+ outpaint_distance_top = params.outpaint_distance_top
133
+ outpaint_distance_right = params.outpaint_distance_right
134
+ outpaint_distance_bottom = params.outpaint_distance_bottom
135
+ inpaint_input_image = params.inpaint_input_image
136
+ inpaint_additional_prompt = params.inpaint_additional_prompt
137
+ inpaint_mask_image_upload = None
138
+
139
+ if inpaint_additional_prompt is None:
140
+ inpaint_additional_prompt = ''
141
+
142
+ image_seed = refresh_seed(image_seed is None, image_seed)
143
+
144
+ cn_tasks = {x: [] for x in flags.ip_list}
145
+ for img_prompt in params.image_prompts:
146
+ cn_img, cn_stop, cn_weight, cn_type = img_prompt
147
+ cn_tasks[cn_type].append([cn_img, cn_stop, cn_weight])
148
+
149
+ advanced_parameters.set_all_advanced_parameters(*params.advanced_params)
150
+
151
+ if inpaint_input_image is not None and inpaint_input_image['image'] is not None:
152
+ inpaint_image_size = inpaint_input_image['image'].shape[:2]
153
+ if inpaint_input_image['mask'] is None:
154
+ inpaint_input_image['mask'] = np.zeros(inpaint_image_size, dtype=np.uint8)
155
+ else:
156
+ advanced_parameters.inpaint_mask_upload_checkbox = True
157
+
158
+ inpaint_input_image['mask'] = HWC3(inpaint_input_image['mask'])
159
+ inpaint_mask_image_upload = inpaint_input_image['mask']
160
+
161
+ # Fooocus async_worker.py code start
162
+
163
+ outpaint_selections = [o.lower() for o in outpaint_selections]
164
+ base_model_additional_loras = []
165
+ raw_style_selections = copy.deepcopy(style_selections)
166
+ uov_method = uov_method.lower()
167
+
168
+ if fooocus_expansion in style_selections:
169
+ use_expansion = True
170
+ style_selections.remove(fooocus_expansion)
171
+ else:
172
+ use_expansion = False
173
+
174
+ use_style = len(style_selections) > 0
175
+
176
+ if base_model_name == refiner_model_name:
177
+ print(f'Refiner disabled because base model and refiner are same.')
178
+ refiner_model_name = 'None'
179
+
180
+ assert performance_selection in ['Speed', 'Quality', 'Extreme Speed']
181
+
182
+ steps = 30
183
+
184
+ if performance_selection == 'Speed':
185
+ steps = 30
186
+
187
+ if performance_selection == 'Quality':
188
+ steps = 60
189
+
190
+ if performance_selection == 'Extreme Speed':
191
+ print('Enter LCM mode.')
192
+ progressbar(async_task, 1, 'Downloading LCM components ...')
193
+ loras += [(config.downloading_sdxl_lcm_lora(), 1.0)]
194
+
195
+ if refiner_model_name != 'None':
196
+ print(f'Refiner disabled in LCM mode.')
197
+
198
+ refiner_model_name = 'None'
199
+ sampler_name = advanced_parameters.sampler_name = 'lcm'
200
+ scheduler_name = advanced_parameters.scheduler_name = 'lcm'
201
+ patch.sharpness = sharpness = 0.0
202
+ cfg_scale = guidance_scale = 1.0
203
+ patch.adaptive_cfg = advanced_parameters.adaptive_cfg = 1.0
204
+ refiner_switch = 1.0
205
+ patch.positive_adm_scale = advanced_parameters.adm_scaler_positive = 1.0
206
+ patch.negative_adm_scale = advanced_parameters.adm_scaler_negative = 1.0
207
+ patch.adm_scaler_end = advanced_parameters.adm_scaler_end = 0.0
208
+ steps = 8
209
+
210
+ patch.adaptive_cfg = advanced_parameters.adaptive_cfg
211
+ print(f'[Parameters] Adaptive CFG = {patch.adaptive_cfg}')
212
+
213
+ patch.sharpness = sharpness
214
+ print(f'[Parameters] Sharpness = {patch.sharpness}')
215
+
216
+ patch.positive_adm_scale = advanced_parameters.adm_scaler_positive
217
+ patch.negative_adm_scale = advanced_parameters.adm_scaler_negative
218
+ patch.adm_scaler_end = advanced_parameters.adm_scaler_end
219
+ print(f'[Parameters] ADM Scale = '
220
+ f'{patch.positive_adm_scale} : '
221
+ f'{patch.negative_adm_scale} : '
222
+ f'{patch.adm_scaler_end}')
223
+
224
+ cfg_scale = float(guidance_scale)
225
+ print(f'[Parameters] CFG = {cfg_scale}')
226
+
227
+ initial_latent = None
228
+ denoising_strength = 1.0
229
+ tiled = False
230
+
231
+ # Validate input format
232
+ if not aspect_ratios_selection.replace('*', ' ').replace(' ', '').isdigit():
233
+ raise ValueError("Invalid input format. Please enter aspect ratios in the form 'width*height'.")
234
+ width, height = aspect_ratios_selection.replace('*', '*').replace('*', ' ').split(' ')[:2]
235
+ # Validate width and height are integers
236
+ if not (width.isdigit() and height.isdigit()):
237
+ raise ValueError("Invalid width or height. Please enter valid integers.")
238
+
239
+ width, height = int(width), int(height)
240
+
241
+ skip_prompt_processing = False
242
+ refiner_swap_method = advanced_parameters.refiner_swap_method
243
+
244
+ inpaint_worker.current_task = None
245
+ inpaint_parameterized = advanced_parameters.inpaint_engine != 'None'
246
+ inpaint_image = None
247
+ inpaint_mask = None
248
+ inpaint_head_model_path = None
249
+
250
+ use_synthetic_refiner = False
251
+
252
+ controlnet_canny_path = None
253
+ controlnet_cpds_path = None
254
+ clip_vision_path, ip_negative_path, ip_adapter_path, ip_adapter_face_path = None, None, None, None
255
+
256
+ seed = int(image_seed)
257
+ print(f'[Parameters] Seed = {seed}')
258
+
259
+ sampler_name = advanced_parameters.sampler_name
260
+ scheduler_name = advanced_parameters.scheduler_name
261
+
262
+ goals = []
263
+ tasks = []
264
+
265
+ if input_image_checkbox:
266
+ if (current_tab == 'uov' or (
267
+ current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_vary_upscale)) \
268
+ and uov_method != flags.disabled and uov_input_image is not None:
269
+ uov_input_image = HWC3(uov_input_image)
270
+ if 'vary' in uov_method:
271
+ goals.append('vary')
272
+ elif 'upscale' in uov_method:
273
+ goals.append('upscale')
274
+ if 'fast' in uov_method:
275
+ skip_prompt_processing = True
276
+ else:
277
+ steps = 18
278
+
279
+ if performance_selection == 'Speed':
280
+ steps = 18
281
+
282
+ if performance_selection == 'Quality':
283
+ steps = 36
284
+
285
+ if performance_selection == 'Extreme Speed':
286
+ steps = 8
287
+
288
+ progressbar(async_task, 1, 'Downloading upscale models ...')
289
+ config.downloading_upscale_model()
290
+ if (current_tab == 'inpaint' or (
291
+ current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_inpaint)) \
292
+ and isinstance(inpaint_input_image, dict):
293
+ inpaint_image = inpaint_input_image['image']
294
+ inpaint_mask = inpaint_input_image['mask'][:, :, 0]
295
+
296
+ if advanced_parameters.inpaint_mask_upload_checkbox:
297
+ if isinstance(inpaint_mask_image_upload, np.ndarray):
298
+ if inpaint_mask_image_upload.ndim == 3:
299
+ H, W, C = inpaint_image.shape
300
+ inpaint_mask_image_upload = resample_image(inpaint_mask_image_upload, width=W, height=H)
301
+ inpaint_mask_image_upload = np.mean(inpaint_mask_image_upload, axis=2)
302
+ inpaint_mask_image_upload = (inpaint_mask_image_upload > 127).astype(np.uint8) * 255
303
+ inpaint_mask = inpaint_mask_image_upload
304
+
305
+ if int(advanced_parameters.inpaint_erode_or_dilate) != 0:
306
+ inpaint_mask = erode_or_dilate(inpaint_mask, advanced_parameters.inpaint_erode_or_dilate)
307
+
308
+ if advanced_parameters.invert_mask_checkbox:
309
+ inpaint_mask = 255 - inpaint_mask
310
+
311
+ inpaint_image = HWC3(inpaint_image)
312
+ if isinstance(inpaint_image, np.ndarray) and isinstance(inpaint_mask, np.ndarray) \
313
+ and (np.any(inpaint_mask > 127) or len(outpaint_selections) > 0):
314
+ progressbar(async_task, 1, 'Downloading upscale models ...')
315
+ config.downloading_upscale_model()
316
+ if inpaint_parameterized:
317
+ progressbar(async_task, 1, 'Downloading inpainter ...')
318
+ inpaint_head_model_path, inpaint_patch_model_path = config.downloading_inpaint_models(
319
+ advanced_parameters.inpaint_engine)
320
+ base_model_additional_loras += [(inpaint_patch_model_path, 1.0)]
321
+ print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}')
322
+ if refiner_model_name == 'None':
323
+ use_synthetic_refiner = True
324
+ refiner_switch = 0.5
325
+ else:
326
+ inpaint_head_model_path, inpaint_patch_model_path = None, None
327
+ print(f'[Inpaint] Parameterized inpaint is disabled.')
328
+ if inpaint_additional_prompt != '':
329
+ if prompt == '':
330
+ prompt = inpaint_additional_prompt
331
+ else:
332
+ prompt = inpaint_additional_prompt + '\n' + prompt
333
+ goals.append('inpaint')
334
+ if current_tab == 'ip' or \
335
+ advanced_parameters.mixing_image_prompt_and_inpaint or \
336
+ advanced_parameters.mixing_image_prompt_and_vary_upscale:
337
+ goals.append('cn')
338
+ progressbar(async_task, 1, 'Downloading control models ...')
339
+ if len(cn_tasks[flags.cn_canny]) > 0:
340
+ controlnet_canny_path = config.downloading_controlnet_canny()
341
+ if len(cn_tasks[flags.cn_cpds]) > 0:
342
+ controlnet_cpds_path = config.downloading_controlnet_cpds()
343
+ if len(cn_tasks[flags.cn_ip]) > 0:
344
+ clip_vision_path, ip_negative_path, ip_adapter_path = config.downloading_ip_adapters('ip')
345
+ if len(cn_tasks[flags.cn_ip_face]) > 0:
346
+ clip_vision_path, ip_negative_path, ip_adapter_face_path = config.downloading_ip_adapters(
347
+ 'face')
348
+ progressbar(async_task, 1, 'Loading control models ...')
349
+
350
+ # Load or unload CNs
351
+ pipeline.refresh_controlnets([controlnet_canny_path, controlnet_cpds_path])
352
+ ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_path)
353
+ ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_face_path)
354
+
355
+ switch = int(round(steps * refiner_switch))
356
+
357
+ if advanced_parameters.overwrite_step > 0:
358
+ steps = advanced_parameters.overwrite_step
359
+
360
+ if advanced_parameters.overwrite_switch > 0:
361
+ switch = advanced_parameters.overwrite_switch
362
+
363
+ if advanced_parameters.overwrite_width > 0:
364
+ width = advanced_parameters.overwrite_width
365
+
366
+ if advanced_parameters.overwrite_height > 0:
367
+ height = advanced_parameters.overwrite_height
368
+
369
+ print(f'[Parameters] Sampler = {sampler_name} - {scheduler_name}')
370
+ print(f'[Parameters] Steps = {steps} - {switch}')
371
+
372
+ progressbar(async_task, 1, 'Initializing ...')
373
+
374
+ if not skip_prompt_processing:
375
+
376
+ prompts = remove_empty_str([safe_str(p) for p in prompt.splitlines()], default='')
377
+ negative_prompts = remove_empty_str([safe_str(p) for p in negative_prompt.splitlines()], default='')
378
+
379
+ prompt = prompts[0]
380
+ negative_prompt = negative_prompts[0]
381
+
382
+ if prompt == '':
383
+ # disable expansion when empty since it is not meaningful and influences image prompt
384
+ use_expansion = False
385
+
386
+ extra_positive_prompts = prompts[1:] if len(prompts) > 1 else []
387
+ extra_negative_prompts = negative_prompts[1:] if len(negative_prompts) > 1 else []
388
+
389
+ progressbar(async_task, 3, 'Loading models ...')
390
+ pipeline.refresh_everything(refiner_model_name=refiner_model_name, base_model_name=base_model_name,
391
+ loras=loras, base_model_additional_loras=base_model_additional_loras,
392
+ use_synthetic_refiner=use_synthetic_refiner)
393
+
394
+ progressbar(async_task, 3, 'Processing prompts ...')
395
+ tasks = []
396
+ for i in range(image_number):
397
+ task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not
398
+ task_rng = random.Random(task_seed) # may bind to inpaint noise in the future
399
+
400
+ task_prompt = apply_wildcards(prompt, task_rng)
401
+ task_negative_prompt = apply_wildcards(negative_prompt, task_rng)
402
+ task_extra_positive_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_positive_prompts]
403
+ task_extra_negative_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_negative_prompts]
404
+
405
+ positive_basic_workloads = []
406
+ negative_basic_workloads = []
407
+
408
+ if use_style:
409
+ for s in style_selections:
410
+ p, n = apply_style(s, positive=task_prompt)
411
+ positive_basic_workloads = positive_basic_workloads + p
412
+ negative_basic_workloads = negative_basic_workloads + n
413
+ else:
414
+ positive_basic_workloads.append(task_prompt)
415
+
416
+ negative_basic_workloads.append(task_negative_prompt) # Always use independent workload for negative.
417
+
418
+ positive_basic_workloads = positive_basic_workloads + task_extra_positive_prompts
419
+ negative_basic_workloads = negative_basic_workloads + task_extra_negative_prompts
420
+
421
+ positive_basic_workloads = remove_empty_str(positive_basic_workloads, default=task_prompt)
422
+ negative_basic_workloads = remove_empty_str(negative_basic_workloads, default=task_negative_prompt)
423
+
424
+ tasks.append(dict(
425
+ task_seed=task_seed,
426
+ task_prompt=task_prompt,
427
+ task_negative_prompt=task_negative_prompt,
428
+ positive=positive_basic_workloads,
429
+ negative=negative_basic_workloads,
430
+ expansion='',
431
+ c=None,
432
+ uc=None,
433
+ positive_top_k=len(positive_basic_workloads),
434
+ negative_top_k=len(negative_basic_workloads),
435
+ log_positive_prompt='\n'.join([task_prompt] + task_extra_positive_prompts),
436
+ log_negative_prompt='\n'.join([task_negative_prompt] + task_extra_negative_prompts),
437
+ ))
438
+
439
+ if use_expansion:
440
+ for i, t in enumerate(tasks):
441
+ progressbar(async_task, 5, f'Preparing Fooocus text #{i + 1} ...')
442
+ expansion = pipeline.final_expansion(t['task_prompt'], t['task_seed'])
443
+ print(f'[Prompt Expansion] {expansion}')
444
+ t['expansion'] = expansion
445
+ t['positive'] = copy.deepcopy(t['positive']) + [expansion] # Deep copy.
446
+
447
+ for i, t in enumerate(tasks):
448
+ progressbar(async_task, 7, f'Encoding positive #{i + 1} ...')
449
+ t['c'] = pipeline.clip_encode(texts=t['positive'], pool_top_k=t['positive_top_k'])
450
+
451
+ for i, t in enumerate(tasks):
452
+ if abs(float(cfg_scale) - 1.0) < 1e-4:
453
+ t['uc'] = pipeline.clone_cond(t['c'])
454
+ else:
455
+ progressbar(async_task, 10, f'Encoding negative #{i + 1} ...')
456
+ t['uc'] = pipeline.clip_encode(texts=t['negative'], pool_top_k=t['negative_top_k'])
457
+
458
+ if len(goals) > 0:
459
+ progressbar(async_task, 13, 'Image processing ...')
460
+
461
+ if 'vary' in goals:
462
+ if 'subtle' in uov_method:
463
+ denoising_strength = 0.5
464
+ if 'strong' in uov_method:
465
+ denoising_strength = 0.85
466
+ if advanced_parameters.overwrite_vary_strength > 0:
467
+ denoising_strength = advanced_parameters.overwrite_vary_strength
468
+
469
+ shape_ceil = get_image_shape_ceil(uov_input_image)
470
+ if shape_ceil < 1024:
471
+ print(f'[Vary] Image is resized because it is too small.')
472
+ shape_ceil = 1024
473
+ elif shape_ceil > 2048:
474
+ print(f'[Vary] Image is resized because it is too big.')
475
+ shape_ceil = 2048
476
+
477
+ uov_input_image = set_image_shape_ceil(uov_input_image, shape_ceil)
478
+
479
+ initial_pixels = core.numpy_to_pytorch(uov_input_image)
480
+ progressbar(async_task, 13, 'VAE encoding ...')
481
+
482
+ candidate_vae, _ = pipeline.get_candidate_vae(
483
+ steps=steps,
484
+ switch=switch,
485
+ denoise=denoising_strength,
486
+ refiner_swap_method=refiner_swap_method
487
+ )
488
+
489
+ initial_latent = core.encode_vae(vae=candidate_vae, pixels=initial_pixels)
490
+ B, C, H, W = initial_latent['samples'].shape
491
+ width = W * 8
492
+ height = H * 8
493
+ print(f'Final resolution is {str((height, width))}.')
494
+
495
+ if 'upscale' in goals:
496
+ H, W, C = uov_input_image.shape
497
+ progressbar(async_task, 13, f'Upscaling image from {str((H, W))} ...')
498
+ uov_input_image = perform_upscale(uov_input_image)
499
+ print(f'Image upscaled.')
500
+
501
+ f = 1.0
502
+ if upscale_value is not None and upscale_value > 1.0:
503
+ f = upscale_value
504
+ else:
505
+ pattern = r"([0-9]+(?:\.[0-9]+)?)x"
506
+ matches = re.findall(pattern, uov_method)
507
+ if len(matches) > 0:
508
+ f_tmp = float(matches[0])
509
+ f = 1.0 if f_tmp < 1.0 else 5.0 if f_tmp > 5.0 else f_tmp
510
+
511
+ shape_ceil = get_shape_ceil(H * f, W * f)
512
+
513
+ if shape_ceil < 1024:
514
+ print(f'[Upscale] Image is resized because it is too small.')
515
+ uov_input_image = set_image_shape_ceil(uov_input_image, 1024)
516
+ shape_ceil = 1024
517
+ else:
518
+ uov_input_image = resample_image(uov_input_image, width=W * f, height=H * f)
519
+
520
+ image_is_super_large = shape_ceil > 2800
521
+
522
+ if 'fast' in uov_method:
523
+ direct_return = True
524
+ elif image_is_super_large:
525
+ print('Image is too large. Directly returned the SR image. '
526
+ 'Usually directly return SR image at 4K resolution '
527
+ 'yields better results than SDXL diffusion.')
528
+ direct_return = True
529
+ else:
530
+ direct_return = False
531
+
532
+ if direct_return:
533
+ d = [('Upscale (Fast)', '2x')]
534
+ log(uov_input_image, d)
535
+ return yield_result(async_task, uov_input_image, tasks)
536
+
537
+ tiled = True
538
+ denoising_strength = 0.382
539
+
540
+ if advanced_parameters.overwrite_upscale_strength > 0:
541
+ denoising_strength = advanced_parameters.overwrite_upscale_strength
542
+
543
+ initial_pixels = core.numpy_to_pytorch(uov_input_image)
544
+ progressbar(async_task, 13, 'VAE encoding ...')
545
+
546
+ candidate_vae, _ = pipeline.get_candidate_vae(
547
+ steps=steps,
548
+ switch=switch,
549
+ denoise=denoising_strength,
550
+ refiner_swap_method=refiner_swap_method
551
+ )
552
+
553
+ initial_latent = core.encode_vae(
554
+ vae=candidate_vae,
555
+ pixels=initial_pixels, tiled=True)
556
+ B, C, H, W = initial_latent['samples'].shape
557
+ width = W * 8
558
+ height = H * 8
559
+ print(f'Final resolution is {str((height, width))}.')
560
+
561
+ if 'inpaint' in goals:
562
+ if len(outpaint_selections) > 0:
563
+ H, W, C = inpaint_image.shape
564
+ if 'top' in outpaint_selections:
565
+ distance_top = int(H * 0.3)
566
+ if outpaint_distance_top > 0:
567
+ distance_top = outpaint_distance_top
568
+
569
+ inpaint_image = np.pad(inpaint_image, [[distance_top, 0], [0, 0], [0, 0]], mode='edge')
570
+ inpaint_mask = np.pad(inpaint_mask, [[distance_top, 0], [0, 0]], mode='constant',
571
+ constant_values=255)
572
+
573
+ if 'bottom' in outpaint_selections:
574
+ distance_bottom = int(H * 0.3)
575
+ if outpaint_distance_bottom > 0:
576
+ distance_bottom = outpaint_distance_bottom
577
+
578
+ inpaint_image = np.pad(inpaint_image, [[0, distance_bottom], [0, 0], [0, 0]], mode='edge')
579
+ inpaint_mask = np.pad(inpaint_mask, [[0, distance_bottom], [0, 0]], mode='constant',
580
+ constant_values=255)
581
+
582
+ H, W, C = inpaint_image.shape
583
+ if 'left' in outpaint_selections:
584
+ distance_left = int(W * 0.3)
585
+ if outpaint_distance_left > 0:
586
+ distance_left = outpaint_distance_left
587
+
588
+ inpaint_image = np.pad(inpaint_image, [[0, 0], [distance_left, 0], [0, 0]], mode='edge')
589
+ inpaint_mask = np.pad(inpaint_mask, [[0, 0], [distance_left, 0]], mode='constant',
590
+ constant_values=255)
591
+
592
+ if 'right' in outpaint_selections:
593
+ distance_right = int(W * 0.3)
594
+ if outpaint_distance_right > 0:
595
+ distance_right = outpaint_distance_right
596
+
597
+ inpaint_image = np.pad(inpaint_image, [[0, 0], [0, distance_right], [0, 0]], mode='edge')
598
+ inpaint_mask = np.pad(inpaint_mask, [[0, 0], [0, distance_right]], mode='constant',
599
+ constant_values=255)
600
+
601
+ inpaint_image = np.ascontiguousarray(inpaint_image.copy())
602
+ inpaint_mask = np.ascontiguousarray(inpaint_mask.copy())
603
+ advanced_parameters.inpaint_strength = 1.0
604
+ advanced_parameters.inpaint_respective_field = 1.0
605
+
606
+ denoising_strength = advanced_parameters.inpaint_strength
607
+
608
+ inpaint_worker.current_task = inpaint_worker.InpaintWorker(
609
+ image=inpaint_image,
610
+ mask=inpaint_mask,
611
+ use_fill=denoising_strength > 0.99,
612
+ k=advanced_parameters.inpaint_respective_field
613
+ )
614
+
615
+ if advanced_parameters.debugging_inpaint_preprocessor:
616
+ return yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(),
617
+ do_not_show_finished_images=True)
618
+
619
+ progressbar(async_task, 13, 'VAE Inpaint encoding ...')
620
+
621
+ inpaint_pixel_fill = core.numpy_to_pytorch(inpaint_worker.current_task.interested_fill)
622
+ inpaint_pixel_image = core.numpy_to_pytorch(inpaint_worker.current_task.interested_image)
623
+ inpaint_pixel_mask = core.numpy_to_pytorch(inpaint_worker.current_task.interested_mask)
624
+
625
+ candidate_vae, candidate_vae_swap = pipeline.get_candidate_vae(
626
+ steps=steps,
627
+ switch=switch,
628
+ denoise=denoising_strength,
629
+ refiner_swap_method=refiner_swap_method
630
+ )
631
+
632
+ latent_inpaint, latent_mask = core.encode_vae_inpaint(
633
+ mask=inpaint_pixel_mask,
634
+ vae=candidate_vae,
635
+ pixels=inpaint_pixel_image)
636
+
637
+ latent_swap = None
638
+ if candidate_vae_swap is not None:
639
+ progressbar(async_task, 13, 'VAE SD15 encoding ...')
640
+ latent_swap = core.encode_vae(
641
+ vae=candidate_vae_swap,
642
+ pixels=inpaint_pixel_fill)['samples']
643
+
644
+ progressbar(async_task, 13, 'VAE encoding ...')
645
+ latent_fill = core.encode_vae(
646
+ vae=candidate_vae,
647
+ pixels=inpaint_pixel_fill)['samples']
648
+
649
+ inpaint_worker.current_task.load_latent(
650
+ latent_fill=latent_fill, latent_mask=latent_mask, latent_swap=latent_swap)
651
+
652
+ if inpaint_parameterized:
653
+ pipeline.final_unet = inpaint_worker.current_task.patch(
654
+ inpaint_head_model_path=inpaint_head_model_path,
655
+ inpaint_latent=latent_inpaint,
656
+ inpaint_latent_mask=latent_mask,
657
+ model=pipeline.final_unet
658
+ )
659
+
660
+ if not advanced_parameters.inpaint_disable_initial_latent:
661
+ initial_latent = {'samples': latent_fill}
662
+
663
+ B, C, H, W = latent_fill.shape
664
+ height, width = H * 8, W * 8
665
+ final_height, final_width = inpaint_worker.current_task.image.shape[:2]
666
+ print(f'Final resolution is {str((final_height, final_width))}, latent is {str((height, width))}.')
667
+
668
+ if 'cn' in goals:
669
+ for task in cn_tasks[flags.cn_canny]:
670
+ cn_img, cn_stop, cn_weight = task
671
+ cn_img = resize_image(HWC3(cn_img), width=width, height=height)
672
+
673
+ if not advanced_parameters.skipping_cn_preprocessor:
674
+ cn_img = preprocessors.canny_pyramid(cn_img)
675
+
676
+ cn_img = HWC3(cn_img)
677
+ task[0] = core.numpy_to_pytorch(cn_img)
678
+ if advanced_parameters.debugging_cn_preprocessor:
679
+ return yield_result(async_task, cn_img, tasks)
680
+ for task in cn_tasks[flags.cn_cpds]:
681
+ cn_img, cn_stop, cn_weight = task
682
+ cn_img = resize_image(HWC3(cn_img), width=width, height=height)
683
+
684
+ if not advanced_parameters.skipping_cn_preprocessor:
685
+ cn_img = preprocessors.cpds(cn_img)
686
+
687
+ cn_img = HWC3(cn_img)
688
+ task[0] = core.numpy_to_pytorch(cn_img)
689
+ if advanced_parameters.debugging_cn_preprocessor:
690
+ return yield_result(async_task, cn_img, tasks)
691
+ for task in cn_tasks[flags.cn_ip]:
692
+ cn_img, cn_stop, cn_weight = task
693
+ cn_img = HWC3(cn_img)
694
+
695
+ # https://github.com/tencent-ailab/IP-Adapter/blob/d580c50a291566bbf9fc7ac0f760506607297e6d/README.md?plain=1#L75
696
+ cn_img = resize_image(cn_img, width=224, height=224, resize_mode=0)
697
+
698
+ task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_path)
699
+ if advanced_parameters.debugging_cn_preprocessor:
700
+ return yield_result(async_task, cn_img, tasks)
701
+ for task in cn_tasks[flags.cn_ip_face]:
702
+ cn_img, cn_stop, cn_weight = task
703
+ cn_img = HWC3(cn_img)
704
+
705
+ if not advanced_parameters.skipping_cn_preprocessor:
706
+ cn_img = face_crop.crop_image(cn_img)
707
+
708
+ # https://github.com/tencent-ailab/IP-Adapter/blob/d580c50a291566bbf9fc7ac0f760506607297e6d/README.md?plain=1#L75
709
+ cn_img = resize_image(cn_img, width=224, height=224, resize_mode=0)
710
+
711
+ task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_face_path)
712
+ if advanced_parameters.debugging_cn_preprocessor:
713
+ return yield_result(async_task, cn_img, tasks)
714
+
715
+ all_ip_tasks = cn_tasks[flags.cn_ip] + cn_tasks[flags.cn_ip_face]
716
+
717
+ if len(all_ip_tasks) > 0:
718
+ pipeline.final_unet = ip_adapter.patch_model(pipeline.final_unet, all_ip_tasks)
719
+
720
+ if advanced_parameters.freeu_enabled:
721
+ print(f'FreeU is enabled!')
722
+ pipeline.final_unet = core.apply_freeu(
723
+ pipeline.final_unet,
724
+ advanced_parameters.freeu_b1,
725
+ advanced_parameters.freeu_b2,
726
+ advanced_parameters.freeu_s1,
727
+ advanced_parameters.freeu_s2
728
+ )
729
+
730
+ all_steps = steps * image_number
731
+
732
+ print(f'[Parameters] Denoising Strength = {denoising_strength}')
733
+
734
+ if isinstance(initial_latent, dict) and 'samples' in initial_latent:
735
+ log_shape = initial_latent['samples'].shape
736
+ else:
737
+ log_shape = f'Image Space {(height, width)}'
738
+
739
+ print(f'[Parameters] Initial Latent shape: {log_shape}')
740
+
741
+ preparation_time = time.perf_counter() - execution_start_time
742
+ print(f'Preparation time: {preparation_time:.2f} seconds')
743
+
744
+ final_sampler_name = sampler_name
745
+ final_scheduler_name = scheduler_name
746
+
747
+ if scheduler_name == 'lcm':
748
+ final_scheduler_name = 'sgm_uniform'
749
+ if pipeline.final_unet is not None:
750
+ pipeline.final_unet = core.opModelSamplingDiscrete.patch(
751
+ pipeline.final_unet,
752
+ sampling='lcm',
753
+ zsnr=False)[0]
754
+ if pipeline.final_refiner_unet is not None:
755
+ pipeline.final_refiner_unet = core.opModelSamplingDiscrete.patch(
756
+ pipeline.final_refiner_unet,
757
+ sampling='lcm',
758
+ zsnr=False)[0]
759
+ print('Using lcm scheduler.')
760
+
761
+ outputs.append(['preview', (13, 'Moving model to GPU ...', None)])
762
+
763
+ def callback(step, x0, x, total_steps, y):
764
+ done_steps = current_task_id * steps + step
765
+ outputs.append(['preview', (
766
+ int(15.0 + 85.0 * float(done_steps) / float(all_steps)),
767
+ f'Step {step}/{total_steps} in the {current_task_id + 1}-th Sampling',
768
+ y)])
769
+
770
+ for current_task_id, task in enumerate(tasks):
771
+ execution_start_time = time.perf_counter()
772
+
773
+ try:
774
+ positive_cond, negative_cond = task['c'], task['uc']
775
+
776
+ if 'cn' in goals:
777
+ for cn_flag, cn_path in [
778
+ (flags.cn_canny, controlnet_canny_path),
779
+ (flags.cn_cpds, controlnet_cpds_path)
780
+ ]:
781
+ for cn_img, cn_stop, cn_weight in cn_tasks[cn_flag]:
782
+ positive_cond, negative_cond = core.apply_controlnet(
783
+ positive_cond, negative_cond,
784
+ pipeline.loaded_ControlNets[cn_path], cn_img, cn_weight, 0, cn_stop)
785
+
786
+ imgs = pipeline.process_diffusion(
787
+ positive_cond=positive_cond,
788
+ negative_cond=negative_cond,
789
+ steps=steps,
790
+ switch=switch,
791
+ width=width,
792
+ height=height,
793
+ image_seed=task['task_seed'],
794
+ callback=callback,
795
+ sampler_name=final_sampler_name,
796
+ scheduler_name=final_scheduler_name,
797
+ latent=initial_latent,
798
+ denoise=denoising_strength,
799
+ tiled=tiled,
800
+ cfg_scale=cfg_scale,
801
+ refiner_swap_method=refiner_swap_method
802
+ )
803
+
804
+ del task['c'], task['uc'], positive_cond, negative_cond # Save memory
805
+
806
+ if inpaint_worker.current_task is not None:
807
+ imgs = [inpaint_worker.current_task.post_process(x) for x in imgs]
808
+
809
+ for x in imgs:
810
+ d = [
811
+ ('Prompt', task['log_positive_prompt']),
812
+ ('Negative Prompt', task['log_negative_prompt']),
813
+ ('Fooocus V2 Expansion', task['expansion']),
814
+ ('Styles', str(raw_style_selections)),
815
+ ('Performance', performance_selection),
816
+ ('Resolution', str((width, height))),
817
+ ('Sharpness', sharpness),
818
+ ('Guidance Scale', guidance_scale),
819
+ ('ADM Guidance', str((
820
+ patch.positive_adm_scale,
821
+ patch.negative_adm_scale,
822
+ patch.adm_scaler_end))),
823
+ ('Base Model', base_model_name),
824
+ ('Refiner Model', refiner_model_name),
825
+ ('Refiner Switch', refiner_switch),
826
+ ('Sampler', sampler_name),
827
+ ('Scheduler', scheduler_name),
828
+ ('Seed', task['task_seed']),
829
+ ]
830
+ for n, w in loras:
831
+ if n != 'None':
832
+ d.append((f'LoRA', f'{n} : {w}'))
833
+ d.append(('Version', 'v' + fooocus_version.version))
834
+ log(x, d)
835
+
836
+ # Fooocus async_worker.py code end
837
+
838
+ results += imgs
839
+ except model_management.InterruptProcessingException as e:
840
+ print("User stopped")
841
+ results.append(ImageGenerationResult(
842
+ im=None, seed=task['task_seed'], finish_reason=GenerationFinishReason.user_cancel))
843
+ async_task.set_result(results, True, str(e))
844
+ break
845
+ except Exception as e:
846
+ print('Process error:', e)
847
+ results.append(ImageGenerationResult(
848
+ im=None, seed=task['task_seed'], finish_reason=GenerationFinishReason.error))
849
+ async_task.set_result(results, True, str(e))
850
+ break
851
+
852
+ execution_time = time.perf_counter() - execution_start_time
853
+ print(f'Generating and saving time: {execution_time:.2f} seconds')
854
+
855
+ if async_task.finish_with_error:
856
+ task_queue.finish_task(async_task.job_id)
857
+ return async_task.task_result
858
+ return yield_result(None, results, tasks)
859
+ except Exception as e:
860
+ print('Worker error:', e)
861
+ logging.exception(e)
862
+
863
+ if not async_task.is_finished:
864
+ task_queue.finish_task(async_task.job_id)
865
+ async_task.set_result([], True, str(e))
866
+ print(f"[Task Queue] Finish task with error, job_id={async_task.job_id}")
867
+ return []
Fooocus-API/main.py ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import re
4
+ import shutil
5
+ import subprocess
6
+ import sys
7
+ from importlib.util import find_spec
8
+ from threading import Thread
9
+
10
+ from fooocus_api_version import version
11
+ from fooocusapi.repositories_versions import fooocus_commit_hash
12
+ sys.path.append(os.path.dirname(os.path.realpath(__file__)))
13
+
14
+
15
+ print('[System ARGV] ' + str(sys.argv))
16
+
17
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
18
+ os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0"
19
+
20
+ python = sys.executable
21
+ default_command_live = True
22
+ index_url = os.environ.get('INDEX_URL', "")
23
+ re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")
24
+
25
+ fooocus_name = 'Fooocus'
26
+
27
+ fooocus_gitee_repo = 'https://gitee.com/mirrors/fooocus'
28
+ fooocus_github_repo = 'https://github.com/lllyasviel/Fooocus'
29
+
30
+ modules_path = os.path.dirname(os.path.realpath(__file__))
31
+ script_path = modules_path
32
+ dir_repos = "repositories"
33
+
34
+
35
+ # This function was copied from [Fooocus](https://github.com/lllyasviel/Fooocus) repository.
36
+ def onerror(func, path, exc_info):
37
+ import stat
38
+ if not os.access(path, os.W_OK):
39
+ os.chmod(path, stat.S_IWUSR)
40
+ func(path)
41
+ else:
42
+ raise 'Failed to invoke "shutil.rmtree", git management failed.'
43
+
44
+
45
+ # This function was copied from [Fooocus](https://github.com/lllyasviel/Fooocus) repository.
46
+ def git_clone(url, dir, name, hash=None):
47
+ import pygit2
48
+
49
+ try:
50
+ try:
51
+ repo = pygit2.Repository(dir)
52
+ remote_url = repo.remotes['origin'].url
53
+ if remote_url not in [fooocus_gitee_repo, fooocus_github_repo]:
54
+ print(f'{name} exists but remote URL will be updated.')
55
+ del repo
56
+ raise url
57
+ else:
58
+ print(f'{name} exists and URL is correct.')
59
+ url = remote_url
60
+ except:
61
+ if os.path.isdir(dir) or os.path.exists(dir):
62
+ print("Fooocus exists, but not a git repo. You can find how to solve this problem here: https://github.com/konieshadow/Fooocus-API#use-exist-fooocus")
63
+ sys.exit(1)
64
+ os.makedirs(dir, exist_ok=True)
65
+ repo = pygit2.clone_repository(url, dir)
66
+ print(f'{name} cloned from {url}.')
67
+
68
+ remote = repo.remotes['origin']
69
+ remote.fetch()
70
+
71
+ commit = repo.get(hash)
72
+
73
+ repo.checkout_tree(commit, strategy=pygit2.GIT_CHECKOUT_FORCE)
74
+ repo.set_head(commit.id)
75
+
76
+ print(f'{name} checkout finished for {hash}.')
77
+ except Exception as e:
78
+ print(f'Git clone failed for {name}: {str(e)}')
79
+ raise e
80
+
81
+
82
+ # This function was copied from [Fooocus](https://github.com/lllyasviel/Fooocus) repository.
83
+ def repo_dir(name):
84
+ return os.path.join(script_path, dir_repos, name)
85
+
86
+
87
+ # This function was copied from [Fooocus](https://github.com/lllyasviel/Fooocus) repository.
88
+ def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live) -> str:
89
+ if desc is not None:
90
+ print(desc)
91
+
92
+ run_kwargs = {
93
+ "args": command,
94
+ "shell": True,
95
+ "env": os.environ if custom_env is None else custom_env,
96
+ "encoding": 'utf8',
97
+ "errors": 'ignore',
98
+ }
99
+
100
+ if not live:
101
+ run_kwargs["stdout"] = run_kwargs["stderr"] = subprocess.PIPE
102
+
103
+ result = subprocess.run(**run_kwargs)
104
+
105
+ if result.returncode != 0:
106
+ error_bits = [
107
+ f"{errdesc or 'Error running command'}.",
108
+ f"Command: {command}",
109
+ f"Error code: {result.returncode}",
110
+ ]
111
+ if result.stdout:
112
+ error_bits.append(f"stdout: {result.stdout}")
113
+ if result.stderr:
114
+ error_bits.append(f"stderr: {result.stderr}")
115
+ raise RuntimeError("\n".join(error_bits))
116
+
117
+ return result.stdout or ""
118
+
119
+
120
+ # This function was copied from [Fooocus](https://github.com/lllyasviel/Fooocus) repository.
121
+ def run_pip(command, desc=None, live=default_command_live):
122
+ try:
123
+ index_url_line = f' --index-url {index_url}' if index_url != '' else ''
124
+ return run(f'"{python}" -m pip {command} --prefer-binary{index_url_line}', desc=f"Installing {desc}",
125
+ errdesc=f"Couldn't install {desc}", live=live)
126
+ except Exception as e:
127
+ print(e)
128
+ print(f'CMD Failed {desc}: {command}')
129
+ return None
130
+
131
+
132
+ # This function was copied from [Fooocus](https://github.com/lllyasviel/Fooocus) repository.
133
+ def requirements_met(requirements_file):
134
+ """
135
+ Does a simple parse of a requirements.txt file to determine if all requirements in it
136
+ are already installed. Returns True if so, False if not installed or parsing fails.
137
+ """
138
+
139
+ import importlib.metadata
140
+ import packaging.version
141
+
142
+ with open(requirements_file, "r", encoding="utf8") as file:
143
+ for line in file:
144
+ if line.strip() == "":
145
+ continue
146
+
147
+ m = re.match(re_requirement, line)
148
+ if m is None:
149
+ return False
150
+
151
+ package = m.group(1).strip()
152
+ version_required = (m.group(2) or "").strip()
153
+
154
+ if version_required == "":
155
+ continue
156
+
157
+ try:
158
+ version_installed = importlib.metadata.version(package)
159
+ except Exception:
160
+ return False
161
+
162
+ if packaging.version.parse(version_required) != packaging.version.parse(version_installed):
163
+ return False
164
+
165
+ return True
166
+
167
+
168
+ def download_repositories():
169
+ import pygit2
170
+ import requests
171
+
172
+ pygit2.option(pygit2.GIT_OPT_SET_OWNER_VALIDATION, 0)
173
+
174
+ http_proxy = os.environ.get('HTTP_PROXY')
175
+ https_proxy = os.environ.get('HTTPS_PROXY')
176
+
177
+ if http_proxy is not None:
178
+ print(f"Using http proxy for git clone: {http_proxy}")
179
+ os.environ['http_proxy'] = http_proxy
180
+
181
+ if https_proxy is not None:
182
+ print(f"Using https proxy for git clone: {https_proxy}")
183
+ os.environ['https_proxy'] = https_proxy
184
+
185
+ try:
186
+ requests.get("https://policies.google.com/privacy", timeout=5)
187
+ fooocus_repo_url = fooocus_github_repo
188
+ except:
189
+ fooocus_repo_url = fooocus_gitee_repo
190
+ fooocus_repo = os.environ.get(
191
+ 'FOOOCUS_REPO', fooocus_repo_url)
192
+ git_clone(fooocus_repo, repo_dir(fooocus_name),
193
+ "Fooocus", fooocus_commit_hash)
194
+
195
+
196
+ def is_installed(package):
197
+ try:
198
+ spec = find_spec(package)
199
+ except ModuleNotFoundError:
200
+ return False
201
+
202
+ return spec is not None
203
+
204
+
205
+ def download_models():
206
+ vae_approx_filenames = [
207
+ ('xlvaeapp.pth', 'https://huggingface.co/lllyasviel/misc/resolve/main/xlvaeapp.pth'),
208
+ ('vaeapp_sd15.pth', 'https://huggingface.co/lllyasviel/misc/resolve/main/vaeapp_sd15.pt'),
209
+ ('xl-to-v1_interposer-v3.1.safetensors',
210
+ 'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors')
211
+ ]
212
+
213
+ from modules.model_loader import load_file_from_url
214
+ from modules.config import (path_checkpoints as modelfile_path,
215
+ path_loras as lorafile_path,
216
+ path_vae_approx as vae_approx_path,
217
+ path_fooocus_expansion as fooocus_expansion_path,
218
+ checkpoint_downloads,
219
+ path_embeddings as embeddings_path,
220
+ embeddings_downloads, lora_downloads)
221
+
222
+ for file_name, url in checkpoint_downloads.items():
223
+ load_file_from_url(url=url, model_dir=modelfile_path, file_name=file_name)
224
+ for file_name, url in embeddings_downloads.items():
225
+ load_file_from_url(url=url, model_dir=embeddings_path, file_name=file_name)
226
+ for file_name, url in lora_downloads.items():
227
+ load_file_from_url(url=url, model_dir=lorafile_path, file_name=file_name)
228
+ for file_name, url in vae_approx_filenames:
229
+ load_file_from_url(url=url, model_dir=vae_approx_path, file_name=file_name)
230
+
231
+ load_file_from_url(
232
+ url='https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_expansion.bin',
233
+ model_dir=fooocus_expansion_path,
234
+ file_name='pytorch_model.bin'
235
+ )
236
+
237
+
238
+ def install_dependents(args):
239
+ if not args.skip_pip:
240
+ torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu121")
241
+
242
+ # Check if you need pip install
243
+ requirements_file = 'requirements.txt'
244
+ if not requirements_met(requirements_file):
245
+ run_pip(f"install -r \"{requirements_file}\"", "requirements")
246
+
247
+ if not is_installed("torch") or not is_installed("torchvision"):
248
+ print(f"torch_index_url: {torch_index_url}")
249
+ run_pip(f"install torch==2.1.0 torchvision==0.16.0 --extra-index-url {torch_index_url}", "torch")
250
+
251
+ skip_sync_repo = False
252
+ if args.sync_repo is not None:
253
+ if args.sync_repo == 'only':
254
+ print("Only download and sync depent repositories")
255
+ download_repositories()
256
+ models_path = os.path.join(
257
+ script_path, dir_repos, fooocus_name, "models")
258
+ print(
259
+ f"Sync repositories successful. Now you can put model files in subdirectories of '{models_path}'")
260
+ return False
261
+ elif args.sync_repo == 'skip':
262
+ skip_sync_repo = True
263
+ else:
264
+ print(
265
+ f"Invalid value for argument '--sync-repo', acceptable value are 'skip' and 'only'")
266
+ exit(1)
267
+
268
+ if not skip_sync_repo:
269
+ download_repositories()
270
+
271
+ # Add dependent repositories to import path
272
+ sys.path.append(script_path)
273
+ fooocus_path = os.path.join(script_path, dir_repos, fooocus_name)
274
+ sys.path.append(fooocus_path)
275
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
276
+
277
+
278
+ def prepare_environments(args) -> bool:
279
+ import fooocusapi.worker as worker
280
+ worker.task_queue.queue_size = args.queue_size
281
+ worker.task_queue.history_size = args.queue_history
282
+ worker.task_queue.webhook_url = args.webhook_url
283
+ print(f"[Fooocus-API] Task queue size: {args.queue_size}, queue history size: {args.queue_history}, webhook url: {args.webhook_url}")
284
+
285
+ if args.gpu_device_id is not None:
286
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id)
287
+ print("Set device to:", args.gpu_device_id)
288
+
289
+ if args.base_url is None or len(args.base_url.strip()) == 0:
290
+ host = args.host
291
+ if host == '0.0.0.0':
292
+ host = '127.0.0.1'
293
+ args.base_url = f"http://{host}:{args.port}"
294
+
295
+ sys.argv = [sys.argv[0]]
296
+
297
+ if args.preset is not None:
298
+ # Remove and copy preset folder
299
+ origin_preset_folder = os.path.abspath(os.path.join(script_path, dir_repos, fooocus_name, 'presets'))
300
+ preset_folder = os.path.abspath(os.path.join(script_path, 'presets'))
301
+ if os.path.exists(preset_folder):
302
+ shutil.rmtree(preset_folder)
303
+ shutil.copytree(origin_preset_folder, preset_folder)
304
+
305
+ import modules.config as config
306
+ import fooocusapi.parameters as parameters
307
+ parameters.default_inpaint_engine_version = config.default_inpaint_engine_version
308
+ parameters.default_styles = config.default_styles
309
+ parameters.default_base_model_name = config.default_base_model_name
310
+ parameters.default_refiner_model_name = config.default_refiner_model_name
311
+ parameters.default_refiner_switch = config.default_refiner_switch
312
+ parameters.default_loras = config.default_loras
313
+ parameters.default_cfg_scale = config.default_cfg_scale
314
+ parameters.default_prompt_negative = config.default_prompt_negative
315
+ parameters.default_aspect_ratio = parameters.get_aspect_ratio_value(config.default_aspect_ratio)
316
+ parameters.available_aspect_ratios = [parameters.get_aspect_ratio_value(a) for a in config.available_aspect_ratios]
317
+
318
+ ini_cbh_args()
319
+
320
+ download_models()
321
+
322
+ if args.preload_pipeline:
323
+ preplaod_pipeline()
324
+
325
+ return True
326
+
327
+
328
+ def pre_setup(skip_sync_repo: bool = False,
329
+ disable_private_log: bool = False,
330
+ skip_pip=False,
331
+ load_all_models: bool = False,
332
+ preload_pipeline: bool = False,
333
+ always_gpu: bool = False,
334
+ all_in_fp16: bool = False,
335
+ preset: str | None = None):
336
+ class Args(object):
337
+ host = '127.0.0.1'
338
+ port = 8888
339
+ base_url = None
340
+ sync_repo = None
341
+ disable_private_log = False
342
+ skip_pip = False
343
+ preload_pipeline = False
344
+ queue_size = 3
345
+ queue_history = 0
346
+ preset = None
347
+ always_gpu = False
348
+ all_in_fp16 = False
349
+ gpu_device_id = None
350
+
351
+ print("[Pre Setup] Prepare environments")
352
+
353
+ args = Args()
354
+ if skip_sync_repo:
355
+ args.sync_repo = 'skip'
356
+ args.disable_private_log = disable_private_log
357
+ args.skip_pip = skip_pip
358
+ args.preload_pipeline = preload_pipeline
359
+ args.always_gpu = always_gpu
360
+ args.all_in_fp16 = all_in_fp16
361
+ args.preset = preset
362
+
363
+ sys.argv = [sys.argv[0]]
364
+ if args.preset is not None:
365
+ sys.argv.append('--preset')
366
+ sys.argv.append(args.preset)
367
+
368
+ install_dependents(args)
369
+
370
+ import fooocusapi.args as _
371
+ prepare_environments(args)
372
+
373
+ if load_all_models:
374
+ import modules.config as config
375
+ from fooocusapi.parameters import default_inpaint_engine_version
376
+ config.downloading_upscale_model()
377
+ config.downloading_inpaint_models(default_inpaint_engine_version)
378
+ config.downloading_controlnet_canny()
379
+ config.downloading_controlnet_cpds()
380
+ config.downloading_ip_adapters()
381
+ print("[Pre Setup] Finished")
382
+
383
+
384
+ # This function was copied from [Fooocus](https://github.com/lllyasviel/Fooocus) repository.
385
+ def ini_cbh_args():
386
+ from args_manager import args
387
+ return args
388
+
389
+
390
+ def preplaod_pipeline():
391
+ print("Preload pipeline")
392
+ import modules.default_pipeline as _
393
+
394
+
395
+ if __name__ == "__main__":
396
+ print(f"Python {sys.version}")
397
+ print(f"Fooocus-API version: {version}")
398
+
399
+ from fooocusapi.base_args import add_base_args
400
+
401
+ parser = argparse.ArgumentParser()
402
+ add_base_args(parser, True)
403
+
404
+ args, _ = parser.parse_known_args()
405
+ install_dependents(args)
406
+
407
+ from fooocusapi.args import args
408
+
409
+ if prepare_environments(args):
410
+ sys.argv = [sys.argv[0]]
411
+
412
+ # Load pipeline in new thread
413
+ t = Thread(target=preplaod_pipeline, daemon=True)
414
+ t.start()
415
+
416
+ # Start api server
417
+ from fooocusapi.api import start_app
418
+
419
+ start_app(args)
Fooocus-API/predict.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Prediction interface for Cog ⚙️
2
+ # https://github.com/replicate/cog/blob/main/docs/python.md
3
+
4
+ import os
5
+ import numpy as np
6
+
7
+ from PIL import Image
8
+ from typing import List
9
+ from cog import BasePredictor, Input, Path
10
+ from fooocusapi.worker import process_generate, task_queue
11
+ from fooocusapi.file_utils import output_dir
12
+ from fooocusapi.parameters import (GenerationFinishReason,
13
+ ImageGenerationParams,
14
+ available_aspect_ratios,
15
+ uov_methods,
16
+ outpaint_expansions,
17
+ default_styles,
18
+ default_base_model_name,
19
+ default_refiner_model_name,
20
+ default_loras,
21
+ default_refiner_switch,
22
+ default_cfg_scale,
23
+ default_prompt_negative)
24
+ from fooocusapi.task_queue import TaskType
25
+
26
+
27
+ class Predictor(BasePredictor):
28
+ def setup(self) -> None:
29
+ """Load the model into memory to make running multiple predictions efficient"""
30
+ from main import pre_setup
31
+ pre_setup(disable_private_log=True, skip_pip=True, preload_pipeline=True, preset=None)
32
+
33
+ def predict(
34
+ self,
35
+ prompt: str = Input( default='', description="Prompt for image generation"),
36
+ negative_prompt: str = Input( default=default_prompt_negative,
37
+ description="Negtive prompt for image generation"),
38
+ style_selections: str = Input(default=','.join(default_styles),
39
+ description="Fooocus styles applied for image generation, seperated by comma"),
40
+ performance_selection: str = Input( default='Speed',
41
+ description="Performance selection", choices=['Speed', 'Quality', 'Extreme Speed']),
42
+ aspect_ratios_selection: str = Input(default='1152*896',
43
+ description="The generated image's size", choices=available_aspect_ratios),
44
+ image_number: int = Input(default=1,
45
+ description="How many image to generate", ge=1, le=8),
46
+ image_seed: int = Input(default=-1,
47
+ description="Seed to generate image, -1 for random"),
48
+ sharpness: float = Input(default=2.0, ge=0.0, le=30.0),
49
+ guidance_scale: float = Input(default=default_cfg_scale, ge=1.0, le=30.0),
50
+ refiner_switch: float = Input(default=default_refiner_switch, ge=0.1, le=1.0),
51
+ uov_input_image: Path = Input(default=None,
52
+ description="Input image for upscale or variation, keep None for not upscale or variation"),
53
+ uov_method: str = Input(default='Disabled', choices=uov_methods),
54
+ uov_upscale_value: float = Input(default=0, description="Only when Upscale (Custom)"),
55
+ inpaint_additional_prompt: str = Input( default='', description="Prompt for image generation"),
56
+ inpaint_input_image: Path = Input(default=None,
57
+ description="Input image for inpaint or outpaint, keep None for not inpaint or outpaint. Please noticed, `uov_input_image` has bigger priority is not None."),
58
+ inpaint_input_mask: Path = Input(default=None,
59
+ description="Input mask for inpaint"),
60
+ outpaint_selections: str = Input(default='',
61
+ description="Outpaint expansion selections, literal 'Left', 'Right', 'Top', 'Bottom' seperated by comma"),
62
+ outpaint_distance_left: int = Input(default=0,
63
+ description="Outpaint expansion distance from Left of the image"),
64
+ outpaint_distance_top: int = Input(default=0,
65
+ description="Outpaint expansion distance from Top of the image"),
66
+ outpaint_distance_right: int = Input(default=0,
67
+ description="Outpaint expansion distance from Right of the image"),
68
+ outpaint_distance_bottom: int = Input(default=0,
69
+ description="Outpaint expansion distance from Bottom of the image"),
70
+ cn_img1: Path = Input(default=None,
71
+ description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."),
72
+ cn_stop1: float = Input(default=None, ge=0, le=1,
73
+ description="Stop at for image prompt, None for default value"),
74
+ cn_weight1: float = Input(default=None, ge=0, le=2,
75
+ description="Weight for image prompt, None for default value"),
76
+ cn_type1: str = Input(default='ImagePrompt', description="ControlNet type for image prompt", choices=[
77
+ 'ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS']),
78
+ cn_img2: Path = Input(default=None,
79
+ description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."),
80
+ cn_stop2: float = Input(default=None, ge=0, le=1,
81
+ description="Stop at for image prompt, None for default value"),
82
+ cn_weight2: float = Input(default=None, ge=0, le=2,
83
+ description="Weight for image prompt, None for default value"),
84
+ cn_type2: str = Input(default='ImagePrompt', description="ControlNet type for image prompt", choices=[
85
+ 'ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS']),
86
+ cn_img3: Path = Input(default=None,
87
+ description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."),
88
+ cn_stop3: float = Input(default=None, ge=0, le=1,
89
+ description="Stop at for image prompt, None for default value"),
90
+ cn_weight3: float = Input(default=None, ge=0, le=2,
91
+ description="Weight for image prompt, None for default value"),
92
+ cn_type3: str = Input(default='ImagePrompt',
93
+ description="ControlNet type for image prompt", choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS']),
94
+ cn_img4: Path = Input(default=None,
95
+ description="Input image for image prompt. If all cn_img[n] are None, image prompt will not applied."),
96
+ cn_stop4: float = Input(default=None, ge=0, le=1,
97
+ description="Stop at for image prompt, None for default value"),
98
+ cn_weight4: float = Input(default=None, ge=0, le=2,
99
+ description="Weight for image prompt, None for default value"),
100
+ cn_type4: str = Input(default='ImagePrompt', description="ControlNet type for image prompt", choices=['ImagePrompt', 'FaceSwap', 'PyraCanny', 'CPDS']),
101
+ ) -> List[Path]:
102
+ """Run a single prediction on the model"""
103
+ import modules.flags as flags
104
+ from modules.sdxl_styles import legal_style_names
105
+
106
+ base_model_name = default_base_model_name
107
+ refiner_model_name = default_refiner_model_name
108
+ loras = default_loras
109
+
110
+ style_selections_arr = []
111
+ for s in style_selections.strip().split(','):
112
+ style = s.strip()
113
+ if style in legal_style_names:
114
+ style_selections_arr.append(style)
115
+
116
+ if uov_input_image is not None:
117
+ im = Image.open(str(uov_input_image))
118
+ uov_input_image = np.array(im)
119
+
120
+ inpaint_input_image_dict = None
121
+ if inpaint_input_image is not None:
122
+ im = Image.open(str(inpaint_input_image))
123
+ inpaint_input_image = np.array(im)
124
+
125
+ if inpaint_input_mask is not None:
126
+ im = Image.open(str(inpaint_input_mask))
127
+ inpaint_input_mask = np.array(im)
128
+
129
+ inpaint_input_image_dict = {
130
+ 'image': inpaint_input_image,
131
+ 'mask': inpaint_input_mask
132
+ }
133
+
134
+ outpaint_selections_arr = []
135
+ for e in outpaint_selections.strip().split(','):
136
+ expansion = e.strip()
137
+ if expansion in outpaint_expansions:
138
+ outpaint_selections_arr.append(expansion)
139
+
140
+ image_prompts = []
141
+ image_prompt_config = [(cn_img1, cn_stop1, cn_weight1, cn_type1), (cn_img2, cn_stop2, cn_weight2, cn_type2),
142
+ (cn_img3, cn_stop3, cn_weight3, cn_type3), (cn_img4, cn_stop4, cn_weight4, cn_type4)]
143
+ for config in image_prompt_config:
144
+ cn_img, cn_stop, cn_weight, cn_type = config
145
+ if cn_img is not None:
146
+ im = Image.open(str(cn_img))
147
+ cn_img = np.array(im)
148
+ if cn_stop is None:
149
+ cn_stop = flags.default_parameters[cn_type][0]
150
+ if cn_weight is None:
151
+ cn_weight = flags.default_parameters[cn_type][1]
152
+ image_prompts.append((cn_img, cn_stop, cn_weight, cn_type))
153
+
154
+ advanced_params = None
155
+
156
+ params = ImageGenerationParams(prompt=prompt,
157
+ negative_prompt=negative_prompt,
158
+ style_selections=style_selections_arr,
159
+ performance_selection=performance_selection,
160
+ aspect_ratios_selection=aspect_ratios_selection,
161
+ image_number=image_number,
162
+ image_seed=image_seed,
163
+ sharpness=sharpness,
164
+ guidance_scale=guidance_scale,
165
+ base_model_name=base_model_name,
166
+ refiner_model_name=refiner_model_name,
167
+ refiner_switch=refiner_switch,
168
+ loras=loras,
169
+ uov_input_image=uov_input_image,
170
+ uov_method=uov_method,
171
+ upscale_value=uov_upscale_value,
172
+ outpaint_selections=outpaint_selections_arr,
173
+ inpaint_input_image=inpaint_input_image_dict,
174
+ image_prompts=image_prompts,
175
+ advanced_params=advanced_params,
176
+ inpaint_additional_prompt=inpaint_additional_prompt,
177
+ outpaint_distance_left=outpaint_distance_left,
178
+ outpaint_distance_top=outpaint_distance_top,
179
+ outpaint_distance_right=outpaint_distance_right,
180
+ outpaint_distance_bottom=outpaint_distance_bottom
181
+ )
182
+
183
+ print(f"[Predictor Predict] Params: {params.__dict__}")
184
+
185
+ queue_task = task_queue.add_task(TaskType.text_2_img, {'params': params.__dict__, 'require_base64': False})
186
+ if queue_task is None:
187
+ print("[Task Queue] The task queue has reached limit")
188
+ raise Exception(
189
+ f"The task queue has reached limit."
190
+ )
191
+ results = process_generate(queue_task, params)
192
+
193
+ output_paths: List[Path] = []
194
+ for r in results:
195
+ if r.finish_reason == GenerationFinishReason.success and r.im is not None:
196
+ output_paths.append(Path(os.path.join(output_dir, r.im)))
197
+
198
+ print(f"[Predictor Predict] Finished with {len(output_paths)} images")
199
+
200
+ if len(output_paths) == 0:
201
+ raise Exception(
202
+ f"Process failed."
203
+ )
204
+
205
+ return output_paths
Fooocus-API/requirements.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torchsde==0.2.5
2
+ einops==0.4.1
3
+ transformers==4.30.2
4
+ safetensors==0.3.1
5
+ accelerate==0.21.0
6
+ pyyaml==6.0
7
+ Pillow==9.2.0
8
+ scipy==1.9.3
9
+ tqdm==4.64.1
10
+ psutil==5.9.5
11
+ pytorch_lightning==1.9.4
12
+ omegaconf==2.2.3
13
+ pygit2==1.12.2
14
+ opencv-contrib-python==4.8.0.74
15
+ onnxruntime==1.16.3
16
+ timm==0.9.2
17
+ fastapi==0.103.1
18
+ pydantic==2.4.2
19
+ pydantic_core==2.10.1
20
+ python-multipart==0.0.6
21
+ uvicorn[standard]==0.23.2
22
+ sqlalchemy