BlackBeenie commited on
Commit
09b19a1
1 Parent(s): 9340a94

feat: Initial commit

Browse files
.gitattributes CHANGED
@@ -33,3 +33,20 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ clip/bpe_simple_vocab_16e6.txt.gz filter=lfs diff=lfs merge=lfs -text
37
+ docs/finaloutput.png filter=lfs diff=lfs merge=lfs -text
38
+ gfpgan/weights/detection_Resnet50_Final.pth filter=lfs diff=lfs merge=lfs -text
39
+ gfpgan/weights/parsing_parsenet.pth filter=lfs diff=lfs merge=lfs -text
40
+ models/CLIP filter=lfs diff=lfs merge=lfs -text
41
+ models/CodeFormer filter=lfs diff=lfs merge=lfs -text
42
+ models/DMDNet.pth filter=lfs diff=lfs merge=lfs -text
43
+ models/GFPGANv1.4.pth filter=lfs diff=lfs merge=lfs -text
44
+ models/inswapper_128.onnx filter=lfs diff=lfs merge=lfs -text
45
+ mynewshinyroop/Lib/site-packages/_virtualenv.pth filter=lfs diff=lfs merge=lfs -text
46
+ models/CLIP/rd64-uni-refined.pth filter=lfs diff=lfs merge=lfs -text
47
+ models/CodeFormer/facelib/detection_Resnet50_Final.pth filter=lfs diff=lfs merge=lfs -text
48
+ models/CodeFormer/realesrgan/RealESRGAN_x2plus.pth filter=lfs diff=lfs merge=lfs -text
49
+ models/CodeFormer/codeformer.pth filter=lfs diff=lfs merge=lfs -text
50
+ models/CodeFormer/facelib/parsing_parsenet.pth filter=lfs diff=lfs merge=lfs -text
51
+ mynewshinyroop/Lib/site-packages/distutils-precedence.pth filter=lfs diff=lfs merge=lfs -text
52
+ roop-unleashed.ipynb filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ __pycache__
2
+ **/__pycache__
3
+
4
+ .DS_Store
5
+
Dockerfile ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ FROM python:3.11
2
+ WORKDIR /usr/src/app
3
+ RUN apt-get update && apt-get install -y libgl1-mesa-glx
4
+ COPY requirements.txt ./
5
+ RUN pip install --no-cache-dir -r requirements.txt
6
+ COPY . .
7
+ CMD ["python", "run.py"]
LICENSE ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU Affero General Public License is a free, copyleft license for
11
+ software and other kinds of works, specifically designed to ensure
12
+ cooperation with the community in the case of network server software.
13
+
14
+ The licenses for most software and other practical works are designed
15
+ to take away your freedom to share and change the works. By contrast,
16
+ our General Public Licenses are intended to guarantee your freedom to
17
+ share and change all versions of a program--to make sure it remains free
18
+ software for all its users.
19
+
20
+ When we speak of free software, we are referring to freedom, not
21
+ price. Our General Public Licenses are designed to make sure that you
22
+ have the freedom to distribute copies of free software (and charge for
23
+ them if you wish), that you receive source code or can get it if you
24
+ want it, that you can change the software or use pieces of it in new
25
+ free programs, and that you know you can do these things.
26
+
27
+ Developers that use our General Public Licenses protect your rights
28
+ with two steps: (1) assert copyright on the software, and (2) offer
29
+ you this License which gives you legal permission to copy, distribute
30
+ and/or modify the software.
31
+
32
+ A secondary benefit of defending all users' freedom is that
33
+ improvements made in alternate versions of the program, if they
34
+ receive widespread use, become available for other developers to
35
+ incorporate. Many developers of free software are heartened and
36
+ encouraged by the resulting cooperation. However, in the case of
37
+ software used on network servers, this result may fail to come about.
38
+ The GNU General Public License permits making a modified version and
39
+ letting the public access it on a server without ever releasing its
40
+ source code to the public.
41
+
42
+ The GNU Affero General Public License is designed specifically to
43
+ ensure that, in such cases, the modified source code becomes available
44
+ to the community. It requires the operator of a network server to
45
+ provide the source code of the modified version running there to the
46
+ users of that server. Therefore, public use of a modified version, on
47
+ a publicly accessible server, gives the public access to the source
48
+ code of the modified version.
49
+
50
+ An older license, called the Affero General Public License and
51
+ published by Affero, was designed to accomplish similar goals. This is
52
+ a different license, not a version of the Affero GPL, but Affero has
53
+ released a new version of the Affero GPL which permits relicensing under
54
+ this license.
55
+
56
+ The precise terms and conditions for copying, distribution and
57
+ modification follow.
58
+
59
+ TERMS AND CONDITIONS
60
+
61
+ 0. Definitions.
62
+
63
+ "This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+ "Copyright" also means copyright-like laws that apply to other kinds of
66
+ works, such as semiconductor masks.
67
+
68
+ "The Program" refers to any copyrightable work licensed under this
69
+ License. Each licensee is addressed as "you". "Licensees" and
70
+ "recipients" may be individuals or organizations.
71
+
72
+ To "modify" a work means to copy from or adapt all or part of the work
73
+ in a fashion requiring copyright permission, other than the making of an
74
+ exact copy. The resulting work is called a "modified version" of the
75
+ earlier work or a work "based on" the earlier work.
76
+
77
+ A "covered work" means either the unmodified Program or a work based
78
+ on the Program.
79
+
80
+ To "propagate" a work means to do anything with it that, without
81
+ permission, would make you directly or secondarily liable for
82
+ infringement under applicable copyright law, except executing it on a
83
+ computer or modifying a private copy. Propagation includes copying,
84
+ distribution (with or without modification), making available to the
85
+ public, and in some countries other activities as well.
86
+
87
+ To "convey" a work means any kind of propagation that enables other
88
+ parties to make or receive copies. Mere interaction with a user through
89
+ a computer network, with no transfer of a copy, is not conveying.
90
+
91
+ An interactive user interface displays "Appropriate Legal Notices"
92
+ to the extent that it includes a convenient and prominently visible
93
+ feature that (1) displays an appropriate copyright notice, and (2)
94
+ tells the user that there is no warranty for the work (except to the
95
+ extent that warranties are provided), that licensees may convey the
96
+ work under this License, and how to view a copy of this License. If
97
+ the interface presents a list of user commands or options, such as a
98
+ menu, a prominent item in the list meets this criterion.
99
+
100
+ 1. Source Code.
101
+
102
+ The "source code" for a work means the preferred form of the work
103
+ for making modifications to it. "Object code" means any non-source
104
+ form of a work.
105
+
106
+ A "Standard Interface" means an interface that either is an official
107
+ standard defined by a recognized standards body, or, in the case of
108
+ interfaces specified for a particular programming language, one that
109
+ is widely used among developers working in that language.
110
+
111
+ The "System Libraries" of an executable work include anything, other
112
+ than the work as a whole, that (a) is included in the normal form of
113
+ packaging a Major Component, but which is not part of that Major
114
+ Component, and (b) serves only to enable use of the work with that
115
+ Major Component, or to implement a Standard Interface for which an
116
+ implementation is available to the public in source code form. A
117
+ "Major Component", in this context, means a major essential component
118
+ (kernel, window system, and so on) of the specific operating system
119
+ (if any) on which the executable work runs, or a compiler used to
120
+ produce the work, or an object code interpreter used to run it.
121
+
122
+ The "Corresponding Source" for a work in object code form means all
123
+ the source code needed to generate, install, and (for an executable
124
+ work) run the object code and to modify the work, including scripts to
125
+ control those activities. However, it does not include the work's
126
+ System Libraries, or general-purpose tools or generally available free
127
+ programs which are used unmodified in performing those activities but
128
+ which are not part of the work. For example, Corresponding Source
129
+ includes interface definition files associated with source files for
130
+ the work, and the source code for shared libraries and dynamically
131
+ linked subprograms that the work is specifically designed to require,
132
+ such as by intimate data communication or control flow between those
133
+ subprograms and other parts of the work.
134
+
135
+ The Corresponding Source need not include anything that users
136
+ can regenerate automatically from other parts of the Corresponding
137
+ Source.
138
+
139
+ The Corresponding Source for a work in source code form is that
140
+ same work.
141
+
142
+ 2. Basic Permissions.
143
+
144
+ All rights granted under this License are granted for the term of
145
+ copyright on the Program, and are irrevocable provided the stated
146
+ conditions are met. This License explicitly affirms your unlimited
147
+ permission to run the unmodified Program. The output from running a
148
+ covered work is covered by this License only if the output, given its
149
+ content, constitutes a covered work. This License acknowledges your
150
+ rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+ You may make, run and propagate covered works that you do not
153
+ convey, without conditions so long as your license otherwise remains
154
+ in force. You may convey covered works to others for the sole purpose
155
+ of having them make modifications exclusively for you, or provide you
156
+ with facilities for running those works, provided that you comply with
157
+ the terms of this License in conveying all material for which you do
158
+ not control copyright. Those thus making or running the covered works
159
+ for you must do so exclusively on your behalf, under your direction
160
+ and control, on terms that prohibit them from making any copies of
161
+ your copyrighted material outside their relationship with you.
162
+
163
+ Conveying under any other circumstances is permitted solely under
164
+ the conditions stated below. Sublicensing is not allowed; section 10
165
+ makes it unnecessary.
166
+
167
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+ No covered work shall be deemed part of an effective technological
170
+ measure under any applicable law fulfilling obligations under article
171
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+ similar laws prohibiting or restricting circumvention of such
173
+ measures.
174
+
175
+ When you convey a covered work, you waive any legal power to forbid
176
+ circumvention of technological measures to the extent such circumvention
177
+ is effected by exercising rights under this License with respect to
178
+ the covered work, and you disclaim any intention to limit operation or
179
+ modification of the work as a means of enforcing, against the work's
180
+ users, your or third parties' legal rights to forbid circumvention of
181
+ technological measures.
182
+
183
+ 4. Conveying Verbatim Copies.
184
+
185
+ You may convey verbatim copies of the Program's source code as you
186
+ receive it, in any medium, provided that you conspicuously and
187
+ appropriately publish on each copy an appropriate copyright notice;
188
+ keep intact all notices stating that this License and any
189
+ non-permissive terms added in accord with section 7 apply to the code;
190
+ keep intact all notices of the absence of any warranty; and give all
191
+ recipients a copy of this License along with the Program.
192
+
193
+ You may charge any price or no price for each copy that you convey,
194
+ and you may offer support or warranty protection for a fee.
195
+
196
+ 5. Conveying Modified Source Versions.
197
+
198
+ You may convey a work based on the Program, or the modifications to
199
+ produce it from the Program, in the form of source code under the
200
+ terms of section 4, provided that you also meet all of these conditions:
201
+
202
+ a) The work must carry prominent notices stating that you modified
203
+ it, and giving a relevant date.
204
+
205
+ b) The work must carry prominent notices stating that it is
206
+ released under this License and any conditions added under section
207
+ 7. This requirement modifies the requirement in section 4 to
208
+ "keep intact all notices".
209
+
210
+ c) You must license the entire work, as a whole, under this
211
+ License to anyone who comes into possession of a copy. This
212
+ License will therefore apply, along with any applicable section 7
213
+ additional terms, to the whole of the work, and all its parts,
214
+ regardless of how they are packaged. This License gives no
215
+ permission to license the work in any other way, but it does not
216
+ invalidate such permission if you have separately received it.
217
+
218
+ d) If the work has interactive user interfaces, each must display
219
+ Appropriate Legal Notices; however, if the Program has interactive
220
+ interfaces that do not display Appropriate Legal Notices, your
221
+ work need not make them do so.
222
+
223
+ A compilation of a covered work with other separate and independent
224
+ works, which are not by their nature extensions of the covered work,
225
+ and which are not combined with it such as to form a larger program,
226
+ in or on a volume of a storage or distribution medium, is called an
227
+ "aggregate" if the compilation and its resulting copyright are not
228
+ used to limit the access or legal rights of the compilation's users
229
+ beyond what the individual works permit. Inclusion of a covered work
230
+ in an aggregate does not cause this License to apply to the other
231
+ parts of the aggregate.
232
+
233
+ 6. Conveying Non-Source Forms.
234
+
235
+ You may convey a covered work in object code form under the terms
236
+ of sections 4 and 5, provided that you also convey the
237
+ machine-readable Corresponding Source under the terms of this License,
238
+ in one of these ways:
239
+
240
+ a) Convey the object code in, or embodied in, a physical product
241
+ (including a physical distribution medium), accompanied by the
242
+ Corresponding Source fixed on a durable physical medium
243
+ customarily used for software interchange.
244
+
245
+ b) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by a
247
+ written offer, valid for at least three years and valid for as
248
+ long as you offer spare parts or customer support for that product
249
+ model, to give anyone who possesses the object code either (1) a
250
+ copy of the Corresponding Source for all the software in the
251
+ product that is covered by this License, on a durable physical
252
+ medium customarily used for software interchange, for a price no
253
+ more than your reasonable cost of physically performing this
254
+ conveying of source, or (2) access to copy the
255
+ Corresponding Source from a network server at no charge.
256
+
257
+ c) Convey individual copies of the object code with a copy of the
258
+ written offer to provide the Corresponding Source. This
259
+ alternative is allowed only occasionally and noncommercially, and
260
+ only if you received the object code with such an offer, in accord
261
+ with subsection 6b.
262
+
263
+ d) Convey the object code by offering access from a designated
264
+ place (gratis or for a charge), and offer equivalent access to the
265
+ Corresponding Source in the same way through the same place at no
266
+ further charge. You need not require recipients to copy the
267
+ Corresponding Source along with the object code. If the place to
268
+ copy the object code is a network server, the Corresponding Source
269
+ may be on a different server (operated by you or a third party)
270
+ that supports equivalent copying facilities, provided you maintain
271
+ clear directions next to the object code saying where to find the
272
+ Corresponding Source. Regardless of what server hosts the
273
+ Corresponding Source, you remain obligated to ensure that it is
274
+ available for as long as needed to satisfy these requirements.
275
+
276
+ e) Convey the object code using peer-to-peer transmission, provided
277
+ you inform other peers where the object code and Corresponding
278
+ Source of the work are being offered to the general public at no
279
+ charge under subsection 6d.
280
+
281
+ A separable portion of the object code, whose source code is excluded
282
+ from the Corresponding Source as a System Library, need not be
283
+ included in conveying the object code work.
284
+
285
+ A "User Product" is either (1) a "consumer product", which means any
286
+ tangible personal property which is normally used for personal, family,
287
+ or household purposes, or (2) anything designed or sold for incorporation
288
+ into a dwelling. In determining whether a product is a consumer product,
289
+ doubtful cases shall be resolved in favor of coverage. For a particular
290
+ product received by a particular user, "normally used" refers to a
291
+ typical or common use of that class of product, regardless of the status
292
+ of the particular user or of the way in which the particular user
293
+ actually uses, or expects or is expected to use, the product. A product
294
+ is a consumer product regardless of whether the product has substantial
295
+ commercial, industrial or non-consumer uses, unless such uses represent
296
+ the only significant mode of use of the product.
297
+
298
+ "Installation Information" for a User Product means any methods,
299
+ procedures, authorization keys, or other information required to install
300
+ and execute modified versions of a covered work in that User Product from
301
+ a modified version of its Corresponding Source. The information must
302
+ suffice to ensure that the continued functioning of the modified object
303
+ code is in no case prevented or interfered with solely because
304
+ modification has been made.
305
+
306
+ If you convey an object code work under this section in, or with, or
307
+ specifically for use in, a User Product, and the conveying occurs as
308
+ part of a transaction in which the right of possession and use of the
309
+ User Product is transferred to the recipient in perpetuity or for a
310
+ fixed term (regardless of how the transaction is characterized), the
311
+ Corresponding Source conveyed under this section must be accompanied
312
+ by the Installation Information. But this requirement does not apply
313
+ if neither you nor any third party retains the ability to install
314
+ modified object code on the User Product (for example, the work has
315
+ been installed in ROM).
316
+
317
+ The requirement to provide Installation Information does not include a
318
+ requirement to continue to provide support service, warranty, or updates
319
+ for a work that has been modified or installed by the recipient, or for
320
+ the User Product in which it has been modified or installed. Access to a
321
+ network may be denied when the modification itself materially and
322
+ adversely affects the operation of the network or violates the rules and
323
+ protocols for communication across the network.
324
+
325
+ Corresponding Source conveyed, and Installation Information provided,
326
+ in accord with this section must be in a format that is publicly
327
+ documented (and with an implementation available to the public in
328
+ source code form), and must require no special password or key for
329
+ unpacking, reading or copying.
330
+
331
+ 7. Additional Terms.
332
+
333
+ "Additional permissions" are terms that supplement the terms of this
334
+ License by making exceptions from one or more of its conditions.
335
+ Additional permissions that are applicable to the entire Program shall
336
+ be treated as though they were included in this License, to the extent
337
+ that they are valid under applicable law. If additional permissions
338
+ apply only to part of the Program, that part may be used separately
339
+ under those permissions, but the entire Program remains governed by
340
+ this License without regard to the additional permissions.
341
+
342
+ When you convey a copy of a covered work, you may at your option
343
+ remove any additional permissions from that copy, or from any part of
344
+ it. (Additional permissions may be written to require their own
345
+ removal in certain cases when you modify the work.) You may place
346
+ additional permissions on material, added by you to a covered work,
347
+ for which you have or can give appropriate copyright permission.
348
+
349
+ Notwithstanding any other provision of this License, for material you
350
+ add to a covered work, you may (if authorized by the copyright holders of
351
+ that material) supplement the terms of this License with terms:
352
+
353
+ a) Disclaiming warranty or limiting liability differently from the
354
+ terms of sections 15 and 16 of this License; or
355
+
356
+ b) Requiring preservation of specified reasonable legal notices or
357
+ author attributions in that material or in the Appropriate Legal
358
+ Notices displayed by works containing it; or
359
+
360
+ c) Prohibiting misrepresentation of the origin of that material, or
361
+ requiring that modified versions of such material be marked in
362
+ reasonable ways as different from the original version; or
363
+
364
+ d) Limiting the use for publicity purposes of names of licensors or
365
+ authors of the material; or
366
+
367
+ e) Declining to grant rights under trademark law for use of some
368
+ trade names, trademarks, or service marks; or
369
+
370
+ f) Requiring indemnification of licensors and authors of that
371
+ material by anyone who conveys the material (or modified versions of
372
+ it) with contractual assumptions of liability to the recipient, for
373
+ any liability that these contractual assumptions directly impose on
374
+ those licensors and authors.
375
+
376
+ All other non-permissive additional terms are considered "further
377
+ restrictions" within the meaning of section 10. If the Program as you
378
+ received it, or any part of it, contains a notice stating that it is
379
+ governed by this License along with a term that is a further
380
+ restriction, you may remove that term. If a license document contains
381
+ a further restriction but permits relicensing or conveying under this
382
+ License, you may add to a covered work material governed by the terms
383
+ of that license document, provided that the further restriction does
384
+ not survive such relicensing or conveying.
385
+
386
+ If you add terms to a covered work in accord with this section, you
387
+ must place, in the relevant source files, a statement of the
388
+ additional terms that apply to those files, or a notice indicating
389
+ where to find the applicable terms.
390
+
391
+ Additional terms, permissive or non-permissive, may be stated in the
392
+ form of a separately written license, or stated as exceptions;
393
+ the above requirements apply either way.
394
+
395
+ 8. Termination.
396
+
397
+ You may not propagate or modify a covered work except as expressly
398
+ provided under this License. Any attempt otherwise to propagate or
399
+ modify it is void, and will automatically terminate your rights under
400
+ this License (including any patent licenses granted under the third
401
+ paragraph of section 11).
402
+
403
+ However, if you cease all violation of this License, then your
404
+ license from a particular copyright holder is reinstated (a)
405
+ provisionally, unless and until the copyright holder explicitly and
406
+ finally terminates your license, and (b) permanently, if the copyright
407
+ holder fails to notify you of the violation by some reasonable means
408
+ prior to 60 days after the cessation.
409
+
410
+ Moreover, your license from a particular copyright holder is
411
+ reinstated permanently if the copyright holder notifies you of the
412
+ violation by some reasonable means, this is the first time you have
413
+ received notice of violation of this License (for any work) from that
414
+ copyright holder, and you cure the violation prior to 30 days after
415
+ your receipt of the notice.
416
+
417
+ Termination of your rights under this section does not terminate the
418
+ licenses of parties who have received copies or rights from you under
419
+ this License. If your rights have been terminated and not permanently
420
+ reinstated, you do not qualify to receive new licenses for the same
421
+ material under section 10.
422
+
423
+ 9. Acceptance Not Required for Having Copies.
424
+
425
+ You are not required to accept this License in order to receive or
426
+ run a copy of the Program. Ancillary propagation of a covered work
427
+ occurring solely as a consequence of using peer-to-peer transmission
428
+ to receive a copy likewise does not require acceptance. However,
429
+ nothing other than this License grants you permission to propagate or
430
+ modify any covered work. These actions infringe copyright if you do
431
+ not accept this License. Therefore, by modifying or propagating a
432
+ covered work, you indicate your acceptance of this License to do so.
433
+
434
+ 10. Automatic Licensing of Downstream Recipients.
435
+
436
+ Each time you convey a covered work, the recipient automatically
437
+ receives a license from the original licensors, to run, modify and
438
+ propagate that work, subject to this License. You are not responsible
439
+ for enforcing compliance by third parties with this License.
440
+
441
+ An "entity transaction" is a transaction transferring control of an
442
+ organization, or substantially all assets of one, or subdividing an
443
+ organization, or merging organizations. If propagation of a covered
444
+ work results from an entity transaction, each party to that
445
+ transaction who receives a copy of the work also receives whatever
446
+ licenses to the work the party's predecessor in interest had or could
447
+ give under the previous paragraph, plus a right to possession of the
448
+ Corresponding Source of the work from the predecessor in interest, if
449
+ the predecessor has it or can get it with reasonable efforts.
450
+
451
+ You may not impose any further restrictions on the exercise of the
452
+ rights granted or affirmed under this License. For example, you may
453
+ not impose a license fee, royalty, or other charge for exercise of
454
+ rights granted under this License, and you may not initiate litigation
455
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
456
+ any patent claim is infringed by making, using, selling, offering for
457
+ sale, or importing the Program or any portion of it.
458
+
459
+ 11. Patents.
460
+
461
+ A "contributor" is a copyright holder who authorizes use under this
462
+ License of the Program or a work on which the Program is based. The
463
+ work thus licensed is called the contributor's "contributor version".
464
+
465
+ A contributor's "essential patent claims" are all patent claims
466
+ owned or controlled by the contributor, whether already acquired or
467
+ hereafter acquired, that would be infringed by some manner, permitted
468
+ by this License, of making, using, or selling its contributor version,
469
+ but do not include claims that would be infringed only as a
470
+ consequence of further modification of the contributor version. For
471
+ purposes of this definition, "control" includes the right to grant
472
+ patent sublicenses in a manner consistent with the requirements of
473
+ this License.
474
+
475
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+ patent license under the contributor's essential patent claims, to
477
+ make, use, sell, offer for sale, import and otherwise run, modify and
478
+ propagate the contents of its contributor version.
479
+
480
+ In the following three paragraphs, a "patent license" is any express
481
+ agreement or commitment, however denominated, not to enforce a patent
482
+ (such as an express permission to practice a patent or covenant not to
483
+ sue for patent infringement). To "grant" such a patent license to a
484
+ party means to make such an agreement or commitment not to enforce a
485
+ patent against the party.
486
+
487
+ If you convey a covered work, knowingly relying on a patent license,
488
+ and the Corresponding Source of the work is not available for anyone
489
+ to copy, free of charge and under the terms of this License, through a
490
+ publicly available network server or other readily accessible means,
491
+ then you must either (1) cause the Corresponding Source to be so
492
+ available, or (2) arrange to deprive yourself of the benefit of the
493
+ patent license for this particular work, or (3) arrange, in a manner
494
+ consistent with the requirements of this License, to extend the patent
495
+ license to downstream recipients. "Knowingly relying" means you have
496
+ actual knowledge that, but for the patent license, your conveying the
497
+ covered work in a country, or your recipient's use of the covered work
498
+ in a country, would infringe one or more identifiable patents in that
499
+ country that you have reason to believe are valid.
500
+
501
+ If, pursuant to or in connection with a single transaction or
502
+ arrangement, you convey, or propagate by procuring conveyance of, a
503
+ covered work, and grant a patent license to some of the parties
504
+ receiving the covered work authorizing them to use, propagate, modify
505
+ or convey a specific copy of the covered work, then the patent license
506
+ you grant is automatically extended to all recipients of the covered
507
+ work and works based on it.
508
+
509
+ A patent license is "discriminatory" if it does not include within
510
+ the scope of its coverage, prohibits the exercise of, or is
511
+ conditioned on the non-exercise of one or more of the rights that are
512
+ specifically granted under this License. You may not convey a covered
513
+ work if you are a party to an arrangement with a third party that is
514
+ in the business of distributing software, under which you make payment
515
+ to the third party based on the extent of your activity of conveying
516
+ the work, and under which the third party grants, to any of the
517
+ parties who would receive the covered work from you, a discriminatory
518
+ patent license (a) in connection with copies of the covered work
519
+ conveyed by you (or copies made from those copies), or (b) primarily
520
+ for and in connection with specific products or compilations that
521
+ contain the covered work, unless you entered into that arrangement,
522
+ or that patent license was granted, prior to 28 March 2007.
523
+
524
+ Nothing in this License shall be construed as excluding or limiting
525
+ any implied license or other defenses to infringement that may
526
+ otherwise be available to you under applicable patent law.
527
+
528
+ 12. No Surrender of Others' Freedom.
529
+
530
+ If conditions are imposed on you (whether by court order, agreement or
531
+ otherwise) that contradict the conditions of this License, they do not
532
+ excuse you from the conditions of this License. If you cannot convey a
533
+ covered work so as to satisfy simultaneously your obligations under this
534
+ License and any other pertinent obligations, then as a consequence you may
535
+ not convey it at all. For example, if you agree to terms that obligate you
536
+ to collect a royalty for further conveying from those to whom you convey
537
+ the Program, the only way you could satisfy both those terms and this
538
+ License would be to refrain entirely from conveying the Program.
539
+
540
+ 13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+ Notwithstanding any other provision of this License, if you modify the
543
+ Program, your modified version must prominently offer all users
544
+ interacting with it remotely through a computer network (if your version
545
+ supports such interaction) an opportunity to receive the Corresponding
546
+ Source of your version by providing access to the Corresponding Source
547
+ from a network server at no charge, through some standard or customary
548
+ means of facilitating copying of software. This Corresponding Source
549
+ shall include the Corresponding Source for any work covered by version 3
550
+ of the GNU General Public License that is incorporated pursuant to the
551
+ following paragraph.
552
+
553
+ Notwithstanding any other provision of this License, you have
554
+ permission to link or combine any covered work with a work licensed
555
+ under version 3 of the GNU General Public License into a single
556
+ combined work, and to convey the resulting work. The terms of this
557
+ License will continue to apply to the part which is the covered work,
558
+ but the work with which it is combined will remain governed by version
559
+ 3 of the GNU General Public License.
560
+
561
+ 14. Revised Versions of this License.
562
+
563
+ The Free Software Foundation may publish revised and/or new versions of
564
+ the GNU Affero General Public License from time to time. Such new versions
565
+ will be similar in spirit to the present version, but may differ in detail to
566
+ address new problems or concerns.
567
+
568
+ Each version is given a distinguishing version number. If the
569
+ Program specifies that a certain numbered version of the GNU Affero General
570
+ Public License "or any later version" applies to it, you have the
571
+ option of following the terms and conditions either of that numbered
572
+ version or of any later version published by the Free Software
573
+ Foundation. If the Program does not specify a version number of the
574
+ GNU Affero General Public License, you may choose any version ever published
575
+ by the Free Software Foundation.
576
+
577
+ If the Program specifies that a proxy can decide which future
578
+ versions of the GNU Affero General Public License can be used, that proxy's
579
+ public statement of acceptance of a version permanently authorizes you
580
+ to choose that version for the Program.
581
+
582
+ Later license versions may give you additional or different
583
+ permissions. However, no additional obligations are imposed on any
584
+ author or copyright holder as a result of your choosing to follow a
585
+ later version.
586
+
587
+ 15. Disclaimer of Warranty.
588
+
589
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+ 16. Limitation of Liability.
599
+
600
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+ SUCH DAMAGES.
609
+
610
+ 17. Interpretation of Sections 15 and 16.
611
+
612
+ If the disclaimer of warranty and limitation of liability provided
613
+ above cannot be given local legal effect according to their terms,
614
+ reviewing courts shall apply local law that most closely approximates
615
+ an absolute waiver of all civil liability in connection with the
616
+ Program, unless a warranty or assumption of liability accompanies a
617
+ copy of the Program in return for a fee.
618
+
619
+ END OF TERMS AND CONDITIONS
620
+
621
+ How to Apply These Terms to Your New Programs
622
+
623
+ If you develop a new program, and you want it to be of the greatest
624
+ possible use to the public, the best way to achieve this is to make it
625
+ free software which everyone can redistribute and change under these terms.
626
+
627
+ To do so, attach the following notices to the program. It is safest
628
+ to attach them to the start of each source file to most effectively
629
+ state the exclusion of warranty; and each file should have at least
630
+ the "copyright" line and a pointer to where the full notice is found.
631
+
632
+ <one line to give the program's name and a brief idea of what it does.>
633
+ Copyright (C) <year> <name of author>
634
+
635
+ This program is free software: you can redistribute it and/or modify
636
+ it under the terms of the GNU Affero General Public License as published
637
+ by the Free Software Foundation, either version 3 of the License, or
638
+ (at your option) any later version.
639
+
640
+ This program is distributed in the hope that it will be useful,
641
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+ GNU Affero General Public License for more details.
644
+
645
+ You should have received a copy of the GNU Affero General Public License
646
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+ Also add information on how to contact you by electronic and paper mail.
649
+
650
+ If your software can interact with users remotely through a computer
651
+ network, you should also make sure that it provides a way for users to
652
+ get its source. For example, if your program is a web application, its
653
+ interface could display a "Source" link that leads users to an archive
654
+ of the code. There are many ways you could offer source, and different
655
+ solutions will be better for different programs; see section 13 for the
656
+ specific requirements.
657
+
658
+ You should also get your employer (if you work as a programmer) or school,
659
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+ For more information on this, and how to apply and follow the GNU AGPL, see
661
+ <https://www.gnu.org/licenses/>.
README.md CHANGED
@@ -1,10 +1,9 @@
1
  ---
2
- title: Docker Faceswap Sample
3
- emoji: 🏢
4
- colorFrom: green
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
  ---
 
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: face-swap
3
+ app_file: run.py
4
+ sdk: gradio
5
+ sdk_version: 3.40.1
 
 
6
  ---
7
+ # roop-unleashed
8
 
9
+ WIP Version of roop-unleashed using Gradio UI
installer/installer.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import os
4
+ import shutil
5
+ import site
6
+ import subprocess
7
+ import sys
8
+
9
+
10
+ script_dir = os.getcwd()
11
+
12
+
13
+ def run_cmd(cmd, capture_output=False, env=None):
14
+ # Run shell commands
15
+ return subprocess.run(cmd, shell=True, capture_output=capture_output, env=env)
16
+
17
+
18
+ def check_env():
19
+ # If we have access to conda, we are probably in an environment
20
+ conda_not_exist = run_cmd("conda", capture_output=True).returncode
21
+ if conda_not_exist:
22
+ print("Conda is not installed. Exiting...")
23
+ sys.exit()
24
+
25
+ # Ensure this is a new environment and not the base environment
26
+ if os.environ["CONDA_DEFAULT_ENV"] == "base":
27
+ print("Create an environment for this project and activate it. Exiting...")
28
+ sys.exit()
29
+
30
+
31
+ def install_dependencies():
32
+ # Install Git and clone repo
33
+ run_cmd("conda install -y -k git")
34
+ run_cmd("git clone https://github.com/C0untFloyd/roop-unleashed.git")
35
+
36
+ # Install the webui dependencies
37
+ update_dependencies()
38
+
39
+
40
+ def update_dependencies():
41
+ global MY_PATH
42
+
43
+ os.chdir(MY_PATH)
44
+ # do a hard reset for to update even if there are local changes
45
+ run_cmd("git fetch --all")
46
+ run_cmd("git reset --hard origin/main")
47
+ run_cmd("git pull")
48
+ # Installs/Updates dependencies from all requirements.txt
49
+ run_cmd("python -m pip install -r requirements.txt")
50
+
51
+
52
+ def start_app():
53
+ global MY_PATH
54
+
55
+ os.chdir(MY_PATH)
56
+ # forward commandline arguments
57
+ sys.argv.pop(0)
58
+ args = ' '.join(sys.argv)
59
+ print("Launching App")
60
+ run_cmd(f'python run.py {args}')
61
+
62
+
63
+ if __name__ == "__main__":
64
+ global MY_PATH
65
+
66
+ MY_PATH = "roop-unleashed"
67
+
68
+
69
+ # Verifies we are in a conda environment
70
+ check_env()
71
+
72
+ # If webui has already been installed, skip and run
73
+ if not os.path.exists(MY_PATH):
74
+ install_dependencies()
75
+ else:
76
+ # moved update from batch to here, because of batch limitations
77
+ updatechoice = input("Check for Updates? [y/n]").lower()
78
+ if updatechoice == "y":
79
+ update_dependencies()
80
+
81
+ # Run the model with webui
82
+ os.chdir(script_dir)
83
+ start_app()
installer/windows_run.bat ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ REM Please set the following commandline arguments to your prefered settings
3
+ set COMMANDLINE_ARGS=--execution-provider cuda --frame-processor face_swapper face_enhancer --video-encoder libvpx-vp9
4
+
5
+ cd /D "%~dp0"
6
+
7
+ echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end
8
+
9
+ set PATH=%PATH%;%SystemRoot%\system32
10
+
11
+ @rem config
12
+ set INSTALL_DIR=%cd%\installer_files
13
+ set CONDA_ROOT_PREFIX=%cd%\installer_files\conda
14
+ set INSTALL_ENV_DIR=%cd%\installer_files\env
15
+ set MINICONDA_DOWNLOAD_URL=https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe
16
+ set FFMPEG_DOWNLOAD_URL=https://github.com/GyanD/codexffmpeg/releases/download/2023-06-21-git-1bcb8a7338/ffmpeg-2023-06-21-git-1bcb8a7338-essentials_build.zip
17
+ set INSTALL_FFMPEG_DIR=%cd%\installer_files\ffmpeg
18
+ set conda_exists=F
19
+
20
+ @rem figure out whether git and conda needs to be installed
21
+ call "%CONDA_ROOT_PREFIX%\_conda.exe" --version >nul 2>&1
22
+ if "%ERRORLEVEL%" EQU "0" set conda_exists=T
23
+
24
+ @rem (if necessary) install git and conda into a contained environment
25
+ @rem download conda
26
+ if "%conda_exists%" == "F" (
27
+ echo Downloading Miniconda from %MINICONDA_DOWNLOAD_URL% to %INSTALL_DIR%\miniconda_installer.exe
28
+
29
+ mkdir "%INSTALL_DIR%"
30
+ call curl -Lk "%MINICONDA_DOWNLOAD_URL%" > "%INSTALL_DIR%\miniconda_installer.exe" || ( echo. && echo Miniconda failed to download. && goto end )
31
+
32
+ echo Installing Miniconda to %CONDA_ROOT_PREFIX%
33
+ start /wait "" "%INSTALL_DIR%\miniconda_installer.exe" /InstallationType=JustMe /NoShortcuts=1 /AddToPath=0 /RegisterPython=0 /NoRegistry=1 /S /D=%CONDA_ROOT_PREFIX%
34
+
35
+ @rem test the conda binary
36
+ echo Miniconda version:
37
+ call "%CONDA_ROOT_PREFIX%\_conda.exe" --version || ( echo. && echo Miniconda not found. && goto end )
38
+ )
39
+
40
+ @rem create the installer env
41
+ if not exist "%INSTALL_ENV_DIR%" (
42
+ echo Packages to install: %PACKAGES_TO_INSTALL%
43
+ call "%CONDA_ROOT_PREFIX%\_conda.exe" create --no-shortcuts -y -k --prefix "%INSTALL_ENV_DIR%" python=3.10 || ( echo. && echo Conda environment creation failed. && goto end )
44
+ )
45
+
46
+ if not exist "%INSTALL_FFMPEG_DIR%" (
47
+ echo Downloading ffmpeg from %FFMPEG_DOWNLOAD_URL% to %INSTALL_DIR%
48
+ call curl -Lk "%FFMPEG_DOWNLOAD_URL%" > "%INSTALL_DIR%\ffmpeg.zip" || ( echo. && echo ffmpeg failed to download. && goto end )
49
+ call powershell -command "Expand-Archive -Force '%INSTALL_DIR%\ffmpeg.zip' '%INSTALL_DIR%\'"
50
+
51
+ cd "installer_files"
52
+ setlocal EnableExtensions EnableDelayedExpansion
53
+
54
+ for /f "tokens=*" %%f in ('dir /s /b /ad "ffmpeg*"') do (
55
+ ren "%%f" "ffmpeg"
56
+ )
57
+ endlocal
58
+ setx PATH "%INSTALL_FFMPEG_DIR%\bin\;%PATH%"
59
+ echo To use videos, you need to restart roop after this installation.
60
+ cd ..
61
+ )
62
+
63
+ @rem check if conda environment was actually created
64
+ if not exist "%INSTALL_ENV_DIR%\python.exe" ( echo. && echo ERROR: Conda environment is empty. && goto end )
65
+
66
+ @rem activate installer env
67
+ call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end )
68
+
69
+ @rem setup installer env
70
+ echo Launching roop unleashed - please edit windows_run.bat to customize commandline arguments
71
+ call python installer.py %COMMANDLINE_ARGS%
72
+
73
+ echo.
74
+ echo Done!
75
+
76
+ :end
77
+ pause
78
+
79
+
80
+
jaa.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Jaa.py Plugin Framework
3
+ Author: Janvarev Vladislav
4
+
5
+ Jaa.py - minimalistic one-file plugin framework with no dependencies.
6
+ Main functions:
7
+ - run all plugins files from "plugins" folder, base on filename
8
+ - save each plugin options in "options" folder in JSON text files for further editing
9
+
10
+ - Plugins
11
+ must located in plugins/ folder
12
+ must have "start(core)" function, that returns manifest dict
13
+ manifest must contain keys "name" and "version"
14
+ can contain "default_options"
15
+ - if contain - options will be saved in "options" folder and reload instead next time
16
+ - if contain - "start_with_options(core,manifest)" function will run with manifest with "options" key
17
+ manifest will be processed in "process_plugin_manifest" function if you override it
18
+
19
+ - Options (for plugins)
20
+ are saved under "options" folder in JSON format
21
+ created at first run plugin with "default_options"
22
+ updated when plugin change "version"
23
+
24
+ - Example usage:
25
+ class VoiceAssCore(JaaCore): # class must override JaaCore
26
+ def __init__(self):
27
+ JaaCore.__init__(self,__file__)
28
+ ...
29
+
30
+ main = VoiceAssCore()
31
+ main.init_plugins(["core"]) # 1 param - first plugins to be initialized
32
+ # Good if you need some "core" options/plugin to be loaded before others
33
+ # not necessary starts with "plugin_" prefix
34
+
35
+ also can be run like
36
+
37
+ main.init_plugins()
38
+
39
+ - Requirements
40
+ Python 3.5+ (due to dict mix in final_options calc), can be relaxed
41
+ """
42
+
43
+ import os
44
+ import traceback
45
+ import json
46
+
47
+ # here we trying to use termcolor to highlight plugin info and errors during load
48
+ try:
49
+ from termcolor import cprint
50
+ except Exception as e:
51
+ # not found? making a stub!
52
+ def cprint(p,color=None):
53
+ if color == None:
54
+ print(p)
55
+ else:
56
+ print(str(color).upper(),p)
57
+
58
+ version = "2.2.0"
59
+
60
+ class JaaCore:
61
+ verbose = False
62
+
63
+ def __init__(self,root_file = __file__):
64
+ self.jaaPluginPrefix = "plugin_"
65
+ self.jaaVersion = version
66
+ self.jaaRootFolder = os.path.dirname(root_file)
67
+ self.jaaOptionsPath = self.jaaRootFolder+os.path.sep+"plugin_options"
68
+ self.jaaShowTracebackOnPluginErrors = False
69
+ if self.verbose:
70
+ cprint("JAA.PY v{0} class created!".format(version),"blue")
71
+
72
+ # ------------- plugins -----------------
73
+ def init_plugins(self, list_first_plugins = []):
74
+ self.plugin_manifests = {}
75
+
76
+ # 1. run first plugins first!
77
+ for modname in list_first_plugins:
78
+ self.init_plugin(modname)
79
+
80
+ # 2. run all plugins from plugins folder
81
+ from os import listdir
82
+ from os.path import isfile, join
83
+ pluginpath = self.jaaRootFolder+"/plugins"
84
+ files = [f for f in listdir(pluginpath) if isfile(join(pluginpath, f))]
85
+
86
+ for fil in files:
87
+ # print fil[:-3]
88
+ if fil.startswith(self.jaaPluginPrefix) and fil.endswith(".py"):
89
+ modfile = fil[:-3]
90
+ self.init_plugin(modfile)
91
+
92
+
93
+
94
+ def init_plugin(self,modname):
95
+ # import
96
+ try:
97
+ mod = self.import_plugin("plugins."+modname)
98
+ except Exception as e:
99
+ self.print_error("JAA PLUGIN ERROR: {0} error on load: {1}".format(modname, str(e)))
100
+ return False
101
+
102
+ # run start function
103
+ try:
104
+ res = mod.start(self)
105
+ except Exception as e:
106
+ self.print_error("JAA PLUGIN ERROR: {0} error on start: {1}".format(modname, str(e)))
107
+ return False
108
+
109
+ # if plugin has an options
110
+ if "default_options" in res:
111
+ try:
112
+ # saved options try to read
113
+ saved_options = {}
114
+ try:
115
+ with open(self.jaaOptionsPath+'/'+modname+'.json', 'r', encoding="utf-8") as f:
116
+ s = f.read()
117
+ saved_options = json.loads(s)
118
+ #print("Saved options", saved_options)
119
+ except Exception as e:
120
+ pass
121
+
122
+ res["default_options"]["v"] = res["version"]
123
+
124
+
125
+ # only string needs Python 3.5
126
+ final_options = {**res["default_options"], **saved_options}
127
+
128
+ # if no option found or version is differ from mod version
129
+ if len(saved_options) == 0 or saved_options["v"] != res["version"]:
130
+ final_options["v"] = res["version"]
131
+ self.save_plugin_options(modname,final_options)
132
+
133
+ res["options"] = final_options
134
+
135
+ try:
136
+ res2 = mod.start_with_options(self,res)
137
+ if res2 != None:
138
+ res = res2
139
+ except Exception as e:
140
+ self.print_error("JAA PLUGIN ERROR: {0} error on start_with_options processing: {1}".format(modname, str(e)))
141
+ return False
142
+
143
+ except Exception as e:
144
+ self.print_error("JAA PLUGIN ERROR: {0} error on options processing: {1}".format(modname, str(e)))
145
+ return False
146
+
147
+
148
+ # processing plugin manifest
149
+ try:
150
+ # set up name and version
151
+ plugin_name = res["name"]
152
+ plugin_version = res["version"]
153
+
154
+
155
+ self.process_plugin_manifest(modname,res)
156
+
157
+ except Exception as e:
158
+ print("JAA PLUGIN ERROR: {0} error on process startup options: {1}".format(modname, str(e)))
159
+ return False
160
+
161
+ self.plugin_manifests[modname] = res
162
+
163
+ self.on_succ_plugin_start(modname,plugin_name,plugin_version)
164
+ return True
165
+
166
+ def on_succ_plugin_start(self, modname, plugin_name, plugin_version):
167
+ if self.verbose:
168
+ cprint("JAA PLUGIN: {1} {2} ({0}) started!".format(modname, plugin_name, plugin_version))
169
+
170
+ def print_error(self,p):
171
+ cprint(p,"red")
172
+ if self.jaaShowTracebackOnPluginErrors:
173
+ traceback.print_exc()
174
+
175
+ def import_plugin(self, module_name):
176
+ import sys
177
+
178
+ __import__(module_name)
179
+
180
+ if module_name in sys.modules:
181
+ return sys.modules[module_name]
182
+ return None
183
+
184
+ def save_plugin_options(self,modname,options):
185
+ # check folder exists
186
+ if not os.path.exists(self.jaaOptionsPath):
187
+ os.makedirs(self.jaaOptionsPath)
188
+
189
+ str_options = json.dumps(options, sort_keys=True, indent=4, ensure_ascii=False)
190
+ with open(self.jaaOptionsPath+'/'+modname+'.json', 'w', encoding="utf-8") as f:
191
+ f.write(str_options)
192
+ f.close()
193
+
194
+ # process manifest must be overrided in inherit class
195
+ def process_plugin_manifest(self,modname,manifest):
196
+ print("JAA PLUGIN: {0} manifest dummy procession (override 'process_plugin_manifest' function)".format(modname))
197
+ return
198
+
199
+ def plugin_manifest(self,pluginname):
200
+ if pluginname in self.plugin_manifests:
201
+ return self.plugin_manifests[pluginname]
202
+ return {}
203
+
204
+ def plugin_options(self,pluginname):
205
+ manifest = self.plugin_manifest(pluginname)
206
+ if "options" in manifest:
207
+ return manifest["options"]
208
+ return None
209
+
210
+ # ------------ gradio stuff --------------
211
+ def gradio_save(self,pluginname):
212
+ print("Saving options for {0}!".format(pluginname))
213
+ self.save_plugin_options(pluginname,self.plugin_options(pluginname))
214
+
215
+ def gradio_upd(self, pluginname, option, val):
216
+ options = self.plugin_options(pluginname)
217
+
218
+ # special case
219
+ if isinstance(options[option], (list, dict)) and isinstance(val, str):
220
+ import json
221
+ try:
222
+ options[option] = json.loads(val)
223
+ except Exception as e:
224
+ print(e)
225
+ pass
226
+ else:
227
+ options[option] = val
228
+ print(option,val,options)
229
+
230
+ def gradio_render_settings_interface(self, title:str="Settings manager", required_fields_to_show_plugin:list=["default_options"]):
231
+ import gradio as gr
232
+
233
+ with gr.Blocks() as gr_interface:
234
+ gr.Markdown("# {0}".format(title))
235
+ for pluginname in self.plugin_manifests:
236
+ manifest = self.plugin_manifests[pluginname]
237
+
238
+ # calculate if we show plugin
239
+ is_show_plugin = False
240
+ if len(required_fields_to_show_plugin) == 0:
241
+ is_show_plugin = True
242
+ else:
243
+ for k in required_fields_to_show_plugin:
244
+ if manifest.get(k) is not None:
245
+ is_show_plugin = True
246
+
247
+ if is_show_plugin:
248
+ with gr.Tab(pluginname):
249
+ gr.Markdown("## {0} v{1}".format(manifest["name"],manifest["version"]))
250
+ if manifest.get("description") is not None:
251
+ gr.Markdown(manifest.get("description"))
252
+
253
+ if manifest.get("url") is not None:
254
+ gr.Markdown("**URL:** [{0}]({0})".format(manifest.get("url")))
255
+
256
+
257
+ if "options" in manifest:
258
+ options = manifest["options"]
259
+ if len(options) > 1: # not only v
260
+ text_button = gr.Button("Save options".format(pluginname))
261
+ #options_int_list = []
262
+ for option in options:
263
+
264
+ #gr.Label(label=option)
265
+ if option != "v":
266
+ val = options[option]
267
+ label = option
268
+
269
+ if manifest.get("options_label") is not None:
270
+ if manifest.get("options_label").get(option) is not None:
271
+ label = option+": "+manifest.get("options_label").get(option)
272
+
273
+
274
+ if isinstance(val, (bool, )):
275
+ gr_elem = gr.Checkbox(value=val,label=label)
276
+ elif isinstance(val, (dict,list)):
277
+ import json
278
+ gr_elem = gr.Textbox(value=json.dumps(val,ensure_ascii=False), label=label)
279
+ else:
280
+ gr_elem = gr.Textbox(value=val, label=label)
281
+
282
+ def handler(x,pluginname=pluginname,option=option):
283
+ self.gradio_upd(pluginname, option, x)
284
+
285
+ gr_elem.change(handler, gr_elem, None)
286
+
287
+ def handler_save(pluginname=pluginname):
288
+ self.gradio_save(pluginname)
289
+
290
+ text_button.click(handler_save,inputs=None,outputs=None)
291
+ else:
292
+ gr.Markdown("_No options for this plugin_")
293
+
294
+ return gr_interface
295
+
296
+
297
+ def load_options(options_file=None,py_file=None,default_options={}):
298
+ # 1. calculating options filename
299
+ if options_file == None:
300
+ if py_file == None:
301
+ raise Exception('JAA: Options or PY file is not defined, cant calc options filename')
302
+ else:
303
+ options_file = py_file[:-3]+'.json'
304
+
305
+ # 2. try to read saved options
306
+ saved_options = {}
307
+ try:
308
+ with open(options_file, 'r', encoding="utf-8") as f:
309
+ s = f.read()
310
+ saved_options = json.loads(s)
311
+ #print("Saved options", saved_options)
312
+ except Exception as e:
313
+ pass
314
+
315
+ # 3. calculating final options
316
+
317
+ # only string needs Python 3.5
318
+ final_options = {**default_options, **saved_options}
319
+
320
+ # 4. calculating hash from def options to check - is file rewrite needed?
321
+ import hashlib
322
+ hash = hashlib.md5((json.dumps(default_options, sort_keys=True)).encode('utf-8')).hexdigest()
323
+
324
+ # 5. if no option file found or hash was from other default options
325
+ if len(saved_options) == 0 or not ("hash" in saved_options.keys()) or saved_options["hash"] != hash:
326
+ final_options["hash"] = hash
327
+ #self.save_plugin_options(modname,final_options)
328
+
329
+ # saving in file
330
+ str_options = json.dumps(final_options, sort_keys=True, indent=4, ensure_ascii=False)
331
+ with open(options_file, 'w', encoding="utf-8") as f:
332
+ f.write(str_options)
333
+ f.close()
334
+
335
+ return final_options
336
+
337
+ """
338
+ The MIT License (MIT)
339
+ Copyright (c) 2021 Janvarev Vladislav
340
+
341
+ Permission is hereby granted, free of charge, to any person obtaining a copy
342
+ of this software and associated documentation files (the “Software”), to deal
343
+ in the Software without restriction, including without limitation the rights to use,
344
+ copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
345
+ and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
346
+
347
+ The above copyright notice and this permission notice shall be included in all copies or
348
+ substantial portions of the Software.
349
+
350
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
351
+ INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
352
+ PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
353
+ FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
354
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
355
+ """
main.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ from roop import core
4
+
5
+ if __name__ == '__main__':
6
+ core.run()
models/CLIP/rd64-uni-refined.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4956f9a7978a75630b08c9d6ec075b7c51cf43b4751b686e3a011d4012ddc9d
3
+ size 4720707
models/CodeFormer/codeformer.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1009e537e0c2a07d4cabce6355f53cb66767cd4b4297ec7a4a64ca4b8a5684b7
3
+ size 376637898
models/CodeFormer/facelib/detection_Resnet50_Final.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d1de9c2944f2ccddca5f5e010ea5ae64a39845a86311af6fdf30841b0a5a16d
3
+ size 109497761
models/CodeFormer/facelib/parsing_parsenet.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d558d8d0e42c20224f13cf5a29c79eba2d59913419f945545d8cf7b72920de2
3
+ size 85331193
models/CodeFormer/realesrgan/RealESRGAN_x2plus.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49fafd45f8fd7aa8d31ab2a22d14d91b536c34494a5cfe31eb5d89c2fa266abb
3
+ size 67061725
models/DMDNet.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70daeb4b1fd10f241043b587d892a941f2651d7322db02f06ff64b166537f65c
3
+ size 603684323
models/GFPGANv1.4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2cd4703ab14f4d01fd1383a8a8b266f9a5833dacee8e6a79d3bf21a1b6be5ad
3
+ size 348632874
models/inswapper_128.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4a3f08c753cb72d04e10aa0f7dbe3deebbf39567d4ead6dce08e98aa49e16af
3
+ size 554253681
requirements-ci.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy==1.23.5
2
+ opencv-python==4.7.0.72
3
+ onnx==1.14.0
4
+ insightface==0.7.3
5
+ psutil==5.9.5
6
+ tk==0.1.0
7
+ customtkinter==5.2.0
8
+ torch==2.0.1
9
+ torchvision==0.15.2
10
+ onnxruntime==1.15.0
11
+ protobuf==4.23.2
12
+ tqdm==4.65.0
13
+ codeformer-pip==0.0.4
14
+ gfpgan==1.3.8
requirements.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu118
2
+
3
+ numpy==1.24.2
4
+ gradio==3.38.0
5
+ opencv-python==4.7.0.72
6
+ onnx==1.14.0
7
+ insightface==0.7.3
8
+ psutil==5.9.5
9
+ pillow==9.5.0
10
+ torch==2.0.1+cu118; sys_platform != 'darwin'
11
+ torch==2.0.1; sys_platform == 'darwin'
12
+ torchvision==0.15.2+cu118; sys_platform != 'darwin'
13
+ torchvision==0.15.2; sys_platform == 'darwin'
14
+ onnxruntime==1.15.0; sys_platform == 'darwin' and platform_machine != 'arm64'
15
+ onnxruntime-silicon==1.13.1; sys_platform == 'darwin' and platform_machine == 'arm64'
16
+ onnxruntime-gpu==1.15.0; sys_platform != 'darwin'
17
+ protobuf==4.23.2
18
+ tqdm==4.65.0
19
+ codeformer-pip==0.0.4
20
+ gfpgan==1.3.8
21
+ ftfy
22
+ regex
roop/__init__.py ADDED
File without changes
roop/capturer.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import cv2
3
+
4
+ from roop.typing import Frame
5
+
6
+ def get_image_frame(filename: str):
7
+ try:
8
+ frame = cv2.imread(filename)
9
+ return frame
10
+ except:
11
+ print(f"Exception reading {filename}")
12
+ return None
13
+
14
+
15
+
16
+
17
+ def get_video_frame(video_path: str, frame_number: int = 0) -> Optional[Frame]:
18
+ capture = cv2.VideoCapture(video_path)
19
+ frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT)
20
+ capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1))
21
+ has_frame, frame = capture.read()
22
+ capture.release()
23
+ if has_frame:
24
+ return frame
25
+ return None
26
+
27
+
28
+ def get_video_frame_total(video_path: str) -> int:
29
+ capture = cv2.VideoCapture(video_path)
30
+ video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
31
+ capture.release()
32
+ return video_frame_total
roop/core.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import os
4
+ import sys
5
+ import shutil
6
+ # single thread doubles cuda performance - needs to be set before torch import
7
+ if any(arg.startswith('--execution-provider') for arg in sys.argv):
8
+ os.environ['OMP_NUM_THREADS'] = '1'
9
+
10
+ import warnings
11
+ from typing import List
12
+ import platform
13
+ import signal
14
+ import argparse
15
+ import torch
16
+ import onnxruntime
17
+
18
+ import roop.globals
19
+ import roop.metadata
20
+ import roop.utilities as util
21
+ import roop.ui as ui
22
+ from settings import Settings
23
+ from roop.face_helper import extract_face_images
24
+ from chain_img_processor import ChainImgProcessor, ChainVideoProcessor, ChainBatchImageProcessor
25
+
26
+ clip_text = None
27
+
28
+
29
+ if 'ROCMExecutionProvider' in roop.globals.execution_providers:
30
+ del torch
31
+
32
+ warnings.filterwarnings('ignore', category=FutureWarning, module='insightface')
33
+ warnings.filterwarnings('ignore', category=UserWarning, module='torchvision')
34
+
35
+
36
+ def parse_args() -> None:
37
+ signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
38
+ program = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=100))
39
+ program.add_argument('-s', '--source', help='select a source image', dest='source_path')
40
+ program.add_argument('-t', '--target', help='select a target image or video', dest='target_path')
41
+ program.add_argument('-o', '--output', help='select output file or directory', dest='output_path')
42
+ program.add_argument('-f', '--folder', help='select a target folder with images or videos to batch process', dest='target_folder_path')
43
+ program.add_argument('--frame-processor', help='frame processors (choices: face_swapper, face_enhancer, ...)', dest='frame_processor', default=['face_swapper'], nargs='+')
44
+ program.add_argument('--keep-fps', help='keep target fps', dest='keep_fps', action='store_true')
45
+ program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true')
46
+ program.add_argument('--skip-audio', help='skip target audio', dest='skip_audio', action='store_true')
47
+ program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true')
48
+ program.add_argument('--source-face_index', help='index position of source face in image', dest='source_face_index', type=int, default=0)
49
+ program.add_argument('--target-face_index', help='index position of target face in image', dest='target_face_index', type=int, default=0)
50
+ program.add_argument('--video-encoder', help='adjust output video encoder', dest='video_encoder', default='libx264', choices=['libx264', 'libx265', 'libvpx-vp9'])
51
+ program.add_argument('--video-quality', help='adjust output video quality', dest='video_quality', type=int, default=18, choices=range(52), metavar='[0-51]')
52
+ program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int, default=suggest_max_memory())
53
+ program.add_argument('--execution-provider', help='available execution provider (choices: cpu, ...)', dest='execution_provider', default=['cpu'], choices=suggest_execution_providers(), nargs='+')
54
+ program.add_argument('--execution-threads', help='number of execution threads', dest='execution_threads', type=int, default=suggest_execution_threads())
55
+ program.add_argument('-v', '--version', action='version', version=f'{roop.metadata.name} {roop.metadata.version}')
56
+
57
+ args = program.parse_args()
58
+
59
+ roop.globals.source_path = args.source_path
60
+ roop.globals.target_path = args.target_path
61
+ roop.globals.output_path = util.normalize_output_path(roop.globals.source_path, roop.globals.target_path, args.output_path)
62
+ roop.globals.target_folder_path = args.target_folder_path
63
+ roop.globals.headless = args.source_path or args.target_path or args.output_path
64
+ # Always enable all processors when using GUI
65
+ if not roop.globals.headless:
66
+ roop.globals.frame_processors = ['face_swapper', 'face_enhancer']
67
+ else:
68
+ roop.globals.frame_processors = args.frame_processor
69
+
70
+ roop.globals.keep_fps = args.keep_fps
71
+ roop.globals.keep_frames = args.keep_frames
72
+ roop.globals.skip_audio = args.skip_audio
73
+ roop.globals.many_faces = args.many_faces
74
+ roop.globals.source_face_index = args.source_face_index
75
+ roop.globals.target_face_index = args.target_face_index
76
+ roop.globals.video_encoder = args.video_encoder
77
+ roop.globals.video_quality = args.video_quality
78
+ roop.globals.max_memory = args.max_memory
79
+ roop.globals.execution_providers = decode_execution_providers(args.execution_provider)
80
+ roop.globals.execution_threads = args.execution_threads
81
+
82
+
83
+ def encode_execution_providers(execution_providers: List[str]) -> List[str]:
84
+ return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers]
85
+
86
+
87
+ def decode_execution_providers(execution_providers: List[str]) -> List[str]:
88
+ return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers()))
89
+ if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)]
90
+
91
+
92
+ def suggest_max_memory() -> int:
93
+ if platform.system().lower() == 'darwin':
94
+ return 4
95
+ return 16
96
+
97
+
98
+ def suggest_execution_providers() -> List[str]:
99
+ return encode_execution_providers(onnxruntime.get_available_providers())
100
+
101
+
102
+ def suggest_execution_threads() -> int:
103
+ if 'DmlExecutionProvider' in roop.globals.execution_providers:
104
+ return 1
105
+ if 'ROCMExecutionProvider' in roop.globals.execution_providers:
106
+ return 1
107
+ return 8
108
+
109
+
110
+ def limit_resources() -> None:
111
+ # prevent tensorflow memory leak
112
+ # gpus = tensorflow.config.experimental.list_physical_devices('GPU')
113
+ # for gpu in gpus:
114
+ # tensorflow.config.experimental.set_virtual_device_configuration(gpu, [
115
+ # tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)
116
+ # ])
117
+ # limit memory usage
118
+ if roop.globals.max_memory:
119
+ memory = roop.globals.max_memory * 1024 ** 3
120
+ if platform.system().lower() == 'darwin':
121
+ memory = roop.globals.max_memory * 1024 ** 6
122
+ if platform.system().lower() == 'windows':
123
+ import ctypes
124
+ kernel32 = ctypes.windll.kernel32
125
+ kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
126
+ else:
127
+ import resource
128
+ resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
129
+
130
+
131
+ def release_resources() -> None:
132
+ if 'CUDAExecutionProvider' in roop.globals.execution_providers:
133
+ torch.cuda.empty_cache()
134
+
135
+
136
+ def pre_check() -> bool:
137
+ if sys.version_info < (3, 9):
138
+ update_status('Python version is not supported - please upgrade to 3.9 or higher.')
139
+ return False
140
+
141
+ download_directory_path = util.resolve_relative_path('../models')
142
+ util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/inswapper_128.onnx'])
143
+ util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/GFPGANv1.4.pth'])
144
+ util.conditional_download(download_directory_path, ['https://github.com/csxmli2016/DMDNet/releases/download/v1/DMDNet.pth'])
145
+ download_directory_path = util.resolve_relative_path('../models/CLIP')
146
+ util.conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/rd64-uni-refined.pth'])
147
+ download_directory_path = util.resolve_relative_path('../models/CodeFormer')
148
+ util.conditional_download(download_directory_path, ['https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'])
149
+ download_directory_path = util.resolve_relative_path('../models/CodeFormer/facelib')
150
+ util.conditional_download(download_directory_path, ['https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/detection_Resnet50_Final.pth'])
151
+ util.conditional_download(download_directory_path, ['https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth'])
152
+ download_directory_path = util.resolve_relative_path('../models/CodeFormer/realesrgan')
153
+ util.conditional_download(download_directory_path, ['https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/RealESRGAN_x2plus.pth'])
154
+
155
+ if not shutil.which('ffmpeg'):
156
+ update_status('ffmpeg is not installed.')
157
+ return True
158
+
159
+
160
+ def update_status(message: str, scope: str = 'ROOP.CORE') -> None:
161
+ print(f'[{scope}] {message}')
162
+ # if not roop.globals.headless:
163
+ # ui.update_status(message)
164
+
165
+
166
+
167
+ def start() -> None:
168
+ if roop.globals.headless:
169
+ faces = extract_face_images(roop.globals.source_path, (False, 0))
170
+ roop.globals.SELECTED_FACE_DATA_INPUT = faces[roop.globals.source_face_index]
171
+ faces = extract_face_images(roop.globals.target_path, (False, util.has_image_extension(roop.globals.target_path)))
172
+ roop.globals.SELECTED_FACE_DATA_OUTPUT = faces[roop.globals.target_face_index]
173
+ if 'face_enhancer' in roop.globals.frame_processors:
174
+ roop.globals.selected_enhancer = 'GFPGAN'
175
+
176
+ batch_process(None, False, None)
177
+
178
+
179
+ def InitPlugins():
180
+ if not roop.globals.IMAGE_CHAIN_PROCESSOR:
181
+ roop.globals.IMAGE_CHAIN_PROCESSOR = ChainImgProcessor()
182
+ roop.globals.BATCH_IMAGE_CHAIN_PROCESSOR = ChainBatchImageProcessor()
183
+ roop.globals.VIDEO_CHAIN_PROCESSOR = ChainVideoProcessor()
184
+ roop.globals.IMAGE_CHAIN_PROCESSOR.init_with_plugins()
185
+ roop.globals.BATCH_IMAGE_CHAIN_PROCESSOR.init_with_plugins()
186
+ roop.globals.VIDEO_CHAIN_PROCESSOR.init_with_plugins()
187
+
188
+
189
+ def get_processing_plugins(use_clip):
190
+ processors = "faceswap"
191
+ if use_clip:
192
+ processors += ",txt2clip"
193
+
194
+ if roop.globals.selected_enhancer == 'GFPGAN':
195
+ processors += ",gfpgan"
196
+ elif roop.globals.selected_enhancer == 'Codeformer':
197
+ processors += ",codeformer"
198
+ elif roop.globals.selected_enhancer == 'DMDNet':
199
+ processors += ",dmdnet"
200
+
201
+ return processors
202
+
203
+
204
+ def live_swap(frame, swap_mode, use_clip, clip_text):
205
+ if frame is None:
206
+ return frame
207
+
208
+ InitPlugins()
209
+ processors = get_processing_plugins(use_clip)
210
+
211
+
212
+ temp_frame, _ = roop.globals.IMAGE_CHAIN_PROCESSOR.run_chain(frame,
213
+ {"swap_mode": swap_mode,
214
+ "original_frame": frame,
215
+ "blend_ratio": roop.globals.blend_ratio,
216
+ "face_distance_threshold": roop.globals.distance_threshold,
217
+ "input_face_datas": [roop.globals.SELECTED_FACE_DATA_INPUT], "target_face_datas": [roop.globals.SELECTED_FACE_DATA_OUTPUT],
218
+ "clip_prompt": clip_text},
219
+ processors)
220
+ return temp_frame
221
+
222
+
223
+
224
+ def params_gen_func(proc, frame):
225
+ global clip_text
226
+
227
+ return {"original_frame": frame, "blend_ratio": roop.globals.blend_ratio,
228
+ "swap_mode": roop.globals.face_swap_mode, "face_distance_threshold": roop.globals.distance_threshold,
229
+ "input_face_datas": [roop.globals.SELECTED_FACE_DATA_INPUT], "target_face_datas": [roop.globals.SELECTED_FACE_DATA_OUTPUT],
230
+ "clip_prompt": clip_text}
231
+
232
+ def batch_process(files, use_clip, new_clip_text) -> None:
233
+ global clip_text
234
+
235
+ InitPlugins()
236
+ processors = get_processing_plugins(use_clip)
237
+
238
+ clip_text = new_clip_text
239
+
240
+ imagefiles = []
241
+ imagefinalnames = []
242
+ videofiles = []
243
+ videofinalnames = []
244
+ need_join = False
245
+
246
+ if files is None:
247
+ need_join = True
248
+ if roop.globals.target_folder_path is None:
249
+ roop.globals.target_folder_path = os.path.dirname(roop.globals.target_path)
250
+ files = [os.path.basename(roop.globals.target_path)]
251
+ roop.globals.output_path = os.path.dirname(roop.globals.output_path)
252
+ else:
253
+ files = [f for f in os.listdir(roop.globals.target_folder_path) if os.path.isfile(os.path.join(roop.globals.target_folder_path, f))]
254
+
255
+ update_status('Sorting videos/images')
256
+
257
+
258
+ for f in files:
259
+ if need_join:
260
+ fullname = os.path.join(roop.globals.target_folder_path, f)
261
+ else:
262
+ fullname = f
263
+ if util.has_image_extension(fullname):
264
+ imagefiles.append(fullname)
265
+ imagefinalnames.append(util.get_destfilename_from_path(fullname, roop.globals.output_path, f'_fake.{roop.globals.CFG.output_image_format}'))
266
+ elif util.is_video(fullname) or util.has_extension(fullname, ['gif']):
267
+ videofiles.append(fullname)
268
+ videofinalnames.append(util.get_destfilename_from_path(fullname, roop.globals.output_path, f'_fake.{roop.globals.CFG.output_video_format}'))
269
+
270
+
271
+ if(len(imagefiles) > 0):
272
+ update_status('Processing image(s)')
273
+ roop.globals.BATCH_IMAGE_CHAIN_PROCESSOR.run_batch_chain(imagefiles, imagefinalnames, roop.globals.execution_threads, processors, params_gen_func)
274
+ if(len(videofiles) > 0):
275
+ for index,v in enumerate(videofiles):
276
+ update_status(f'Processing video {v}')
277
+ fps = util.detect_fps(v)
278
+ if roop.globals.keep_frames:
279
+ update_status('Creating temp resources...')
280
+ util.create_temp(v)
281
+ update_status('Extracting frames...')
282
+ util.extract_frames(v)
283
+ temp_frame_paths = util.get_temp_frame_paths(v)
284
+ roop.globals.BATCH_IMAGE_CHAIN_PROCESSOR.run_batch_chain(temp_frame_paths, temp_frame_paths, roop.globals.execution_threads, processors, params_gen_func)
285
+ update_status(f'Creating video with {fps} FPS...')
286
+ util.create_video(v, videofinalnames[index], fps)
287
+ else:
288
+ update_status(f'Creating video with {fps} FPS...')
289
+ roop.globals.VIDEO_CHAIN_PROCESSOR.run_video_chain(v,videofinalnames[index], fps, roop.globals.execution_threads, processors, params_gen_func, roop.globals.target_path)
290
+ if os.path.isfile(videofinalnames[index]):
291
+ if util.has_extension(v, ['gif']):
292
+ gifname = roop.utilities.get_destfilename_from_path(v, './output', '_fake.gif')
293
+ update_status('Creating final GIF')
294
+ util.create_gif_from_video(videofinalnames[index], gifname)
295
+ elif not roop.globals.skip_audio:
296
+ finalname = roop.utilities.get_destfilename_from_path(videofinalnames[index], roop.globals.output_path, f'_final.{roop.globals.CFG.output_video_format}')
297
+ util.restore_audio(videofinalnames[index], v, finalname)
298
+ if os.path.isfile(videofinalnames[index]):
299
+ os.remove(videofinalnames[index])
300
+ else:
301
+ update_status('Failed!')
302
+
303
+
304
+ update_status('Finished')
305
+ roop.globals.target_folder_path = None
306
+
307
+
308
+ def destroy() -> None:
309
+ if roop.globals.target_path:
310
+ util.clean_temp(roop.globals.target_path)
311
+ sys.exit()
312
+
313
+
314
+ def run() -> None:
315
+ parse_args()
316
+ if not pre_check():
317
+ return
318
+ limit_resources()
319
+ roop.globals.CFG = Settings('config.yaml')
320
+ if roop.globals.headless:
321
+ start()
322
+ else:
323
+ ui.run()
roop/face_analyser.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ from typing import Any
3
+ import insightface
4
+
5
+ import roop.globals
6
+ from roop.typing import Frame
7
+ import cv2
8
+ from PIL import Image
9
+ from roop.capturer import get_video_frame
10
+
11
+ FACE_ANALYSER = None
12
+ THREAD_LOCK = threading.Lock()
13
+
14
+
15
+ def get_face_analyser() -> Any:
16
+ global FACE_ANALYSER
17
+
18
+ with THREAD_LOCK:
19
+ if FACE_ANALYSER is None:
20
+ FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=roop.globals.execution_providers)
21
+ FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640))
22
+ return FACE_ANALYSER
23
+
24
+
25
+ def get_first_face(frame: Frame) -> Any:
26
+ faces = get_face_analyser().get(frame)
27
+ try:
28
+ return min(faces, key=lambda x: x.bbox[0])
29
+ # return sorted(faces, reverse=True, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]))[0]
30
+ except ValueError:
31
+ return None
32
+
33
+
34
+ def get_all_faces(frame: Frame) -> Any:
35
+ try:
36
+ faces = get_face_analyser().get(frame)
37
+ return sorted(faces, key = lambda x : x.bbox[0])
38
+ except IndexError:
39
+ return None
40
+
41
+ def extract_face_images(source_filename, video_info):
42
+ face_data = []
43
+ source_image = None
44
+
45
+ if video_info[0]:
46
+ frame = get_video_frame(source_filename, video_info[1])
47
+ if frame is not None:
48
+ source_image = frame
49
+ else:
50
+ return face_data
51
+ else:
52
+ source_image = cv2.imread(source_filename)
53
+
54
+
55
+ faces = get_all_faces(source_image)
56
+
57
+ i = 0
58
+ for face in faces:
59
+ (startX, startY, endX, endY) = face['bbox'].astype("int")
60
+ face_temp = source_image[startY:endY, startX:endX]
61
+ if face_temp.size < 1:
62
+ continue
63
+ i += 1
64
+ face_data.append([face, face_temp])
65
+ return face_data
roop/face_helper.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ from typing import Any
3
+ import insightface
4
+
5
+ import roop.globals
6
+ from roop.typing import Frame, Face
7
+
8
+ import cv2
9
+ from PIL import Image
10
+ from roop.capturer import get_video_frame
11
+ from roop.utilities import resolve_relative_path, conditional_download
12
+
13
+ FACE_ANALYSER = None
14
+ THREAD_LOCK_ANALYSER = threading.Lock()
15
+ THREAD_LOCK_SWAPPER = threading.Lock()
16
+ FACE_SWAPPER = None
17
+
18
+
19
+ def get_face_analyser() -> Any:
20
+ global FACE_ANALYSER
21
+
22
+ with THREAD_LOCK_ANALYSER:
23
+ if FACE_ANALYSER is None:
24
+ FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=roop.globals.execution_providers)
25
+ FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640))
26
+ return FACE_ANALYSER
27
+
28
+
29
+ def get_one_face(frame: Frame) -> Any:
30
+ try:
31
+ face = get_face_analyser().get(frame)
32
+ return min(face, key=lambda x: x.bbox[0])
33
+ except ValueError:
34
+ return None
35
+
36
+
37
+ def get_many_faces(frame: Frame) -> Any:
38
+ try:
39
+ faces = get_face_analyser().get(frame)
40
+ return sorted(faces, key = lambda x : x.bbox[0])
41
+ except IndexError:
42
+ return None
43
+
44
+ def extract_face_images(source_filename, video_info):
45
+ face_data = []
46
+ source_image = None
47
+
48
+ if video_info[0]:
49
+ frame = get_video_frame(source_filename, video_info[1])
50
+ if frame is not None:
51
+ source_image = frame
52
+ else:
53
+ return face_data
54
+ else:
55
+ source_image = cv2.imread(source_filename)
56
+
57
+
58
+ faces = get_many_faces(source_image)
59
+
60
+ i = 0
61
+ for face in faces:
62
+ (startX, startY, endX, endY) = face['bbox'].astype("int")
63
+ face_temp = source_image[startY:endY, startX:endX]
64
+ if face_temp.size < 1:
65
+ continue
66
+ i += 1
67
+ face_data.append([face, face_temp])
68
+ return face_data
69
+
70
+
71
+
72
+
73
+ def get_face_swapper() -> Any:
74
+ global FACE_SWAPPER
75
+
76
+ with THREAD_LOCK_SWAPPER:
77
+ if FACE_SWAPPER is None:
78
+ model_path = resolve_relative_path('../models/inswapper_128.onnx')
79
+ FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.execution_providers)
80
+ return FACE_SWAPPER
81
+
82
+
83
+ def pre_check() -> bool:
84
+ download_directory_path = resolve_relative_path('../models')
85
+ conditional_download(download_directory_path, ['https://huggingface.co/countfloyd/deepfake/resolve/main/inswapper_128.onnx'])
86
+ return True
87
+
88
+
89
+ def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
90
+ return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)
roop/globals.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from settings import Settings
2
+ from typing import List
3
+
4
+ source_path = None
5
+ target_path = None
6
+ output_path = None
7
+ target_folder_path = None
8
+
9
+ frame_processors: List[str] = []
10
+ keep_fps = None
11
+ keep_frames = None
12
+ skip_audio = None
13
+ many_faces = None
14
+ use_batch = None
15
+ source_face_index = 0
16
+ target_face_index = 0
17
+ face_position = None
18
+ video_encoder = None
19
+ video_quality = None
20
+ max_memory = None
21
+ execution_providers: List[str] = []
22
+ execution_threads = None
23
+ headless = None
24
+ log_level = 'error'
25
+ selected_enhancer = None
26
+ face_swap_mode = None
27
+ blend_ratio = 0.5
28
+ distance_threshold = 0.65
29
+
30
+ FACE_ENHANCER = None
31
+
32
+ SELECTED_FACE_DATA_INPUT = None
33
+ SELECTED_FACE_DATA_OUTPUT = None
34
+
35
+ IMAGE_CHAIN_PROCESSOR = None
36
+ VIDEO_CHAIN_PROCESSOR = None
37
+ BATCH_IMAGE_CHAIN_PROCESSOR = None
38
+
39
+ CFG: Settings = None
40
+
41
+
roop/metadata.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ name = 'roop unleashed'
2
+ version = '2.6.6'
roop/processors/__init__.py ADDED
File without changes
roop/processors/frame/__init__.py ADDED
File without changes
roop/processors/frame/core.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import importlib
4
+ import psutil
5
+ from concurrent.futures import ThreadPoolExecutor, as_completed
6
+ from queue import Queue
7
+ from types import ModuleType
8
+ from typing import Any, List, Callable
9
+ from roop.typing import Face
10
+ from tqdm import tqdm
11
+
12
+ import roop
13
+
14
+ FRAME_PROCESSORS_MODULES: List[ModuleType] = []
15
+ FRAME_PROCESSORS_INTERFACE = [
16
+ 'pre_check',
17
+ 'pre_start',
18
+ 'process_frame',
19
+ 'process_frames',
20
+ 'process_image',
21
+ 'process_video',
22
+ 'post_process'
23
+ ]
24
+
25
+
26
+ def load_frame_processor_module(frame_processor: str) -> Any:
27
+ try:
28
+ module_name = f'roop.processors.frame.{frame_processor}'
29
+ print(f'Loading {module_name}')
30
+ frame_processor_module = importlib.import_module(module_name)
31
+ for method_name in FRAME_PROCESSORS_INTERFACE:
32
+ if not hasattr(frame_processor_module, method_name):
33
+ raise NotImplementedError
34
+ except ModuleNotFoundError:
35
+ sys.exit(f'Frame processor {frame_processor} not found.')
36
+ except NotImplementedError:
37
+ sys.exit(f'Frame processor {frame_processor} not implemented correctly.')
38
+ return frame_processor_module
39
+
40
+
41
+ def get_frame_processors_modules(frame_processors: List[str]) -> List[ModuleType]:
42
+ global FRAME_PROCESSORS_MODULES
43
+
44
+ if not FRAME_PROCESSORS_MODULES:
45
+ for frame_processor in frame_processors:
46
+ frame_processor_module = load_frame_processor_module(frame_processor)
47
+ FRAME_PROCESSORS_MODULES.append(frame_processor_module)
48
+ return FRAME_PROCESSORS_MODULES
49
+
50
+
51
+ def multi_process_frame(is_batch: bool, source_face: Face, target_face: Face, temp_frame_paths: List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None:
52
+ with ThreadPoolExecutor(max_workers=roop.globals.execution_threads) as executor:
53
+ futures = []
54
+ queue = create_queue(temp_frame_paths)
55
+ queue_per_future = max(len(temp_frame_paths) // roop.globals.execution_threads, 1)
56
+ while not queue.empty():
57
+ future = executor.submit(process_frames, is_batch, source_face, target_face, pick_queue(queue, queue_per_future), update)
58
+ futures.append(future)
59
+ for future in as_completed(futures):
60
+ future.result()
61
+
62
+
63
+ def create_queue(temp_frame_paths: List[str]) -> Queue[str]:
64
+ queue: Queue[str] = Queue()
65
+ for frame_path in temp_frame_paths:
66
+ queue.put(frame_path)
67
+ return queue
68
+
69
+
70
+ def pick_queue(queue: Queue[str], queue_per_future: int) -> List[str]:
71
+ queues = []
72
+ for _ in range(queue_per_future):
73
+ if not queue.empty():
74
+ queues.append(queue.get())
75
+ return queues
76
+
77
+ def process_batch(source_face: Face, target_face: Face, frame_paths: list[str], process_frames: Callable[[str, List[str], Any], None]) -> None:
78
+ progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
79
+ total = len(frame_paths)
80
+ with tqdm(total=total, desc='Processing', unit='frame', dynamic_ncols=True, bar_format=progress_bar_format) as progress:
81
+ multi_process_frame(True, source_face, target_face, frame_paths, process_frames, lambda: update_progress(progress))
82
+
83
+
84
+ def process_video(source_face: Face, target_face: Face, frame_paths: list[str], process_frames: Callable[[str, List[str], Any], None]) -> None:
85
+ progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
86
+ total = len(frame_paths)
87
+ with tqdm(total=total, desc='Processing', unit='frame', dynamic_ncols=True, bar_format=progress_bar_format) as progress:
88
+ multi_process_frame(False, source_face, target_face, frame_paths, process_frames, lambda: update_progress(progress))
89
+
90
+
91
+ def update_progress(progress: Any = None) -> None:
92
+ process = psutil.Process(os.getpid())
93
+ memory_usage = process.memory_info().rss / 1024 / 1024 / 1024
94
+ progress.set_postfix({
95
+ 'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB',
96
+ 'execution_providers': roop.globals.execution_providers,
97
+ 'execution_threads': roop.globals.execution_threads
98
+ })
99
+ progress.refresh()
100
+ progress.update(1)
roop/processors/frame/face_swapper.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Callable
2
+ import cv2
3
+ import insightface
4
+ import threading
5
+
6
+ import roop.globals
7
+ import roop.processors.frame.core
8
+ from roop.core import update_status
9
+ from roop.face_analyser import get_first_face, get_all_faces
10
+ from roop.typing import Face, Frame
11
+ from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video, compute_cosine_distance, get_destfilename_from_path
12
+
13
+ FACE_SWAPPER = None
14
+ THREAD_LOCK = threading.Lock()
15
+ NAME = 'ROOP.FACE-SWAPPER'
16
+
17
+ DIST_THRESHOLD = 0.65
18
+
19
+
20
+ def get_face_swapper() -> Any:
21
+ global FACE_SWAPPER
22
+
23
+ with THREAD_LOCK:
24
+ if FACE_SWAPPER is None:
25
+ model_path = resolve_relative_path('../models/inswapper_128.onnx')
26
+ FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.execution_providers)
27
+ return FACE_SWAPPER
28
+
29
+
30
+ def pre_check() -> bool:
31
+ download_directory_path = resolve_relative_path('../models')
32
+ conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/inswapper_128.onnx'])
33
+ return True
34
+
35
+
36
+ def pre_start() -> bool:
37
+ if not is_image(roop.globals.source_path):
38
+ update_status('Select an image for source path.', NAME)
39
+ return False
40
+ elif not get_first_face(cv2.imread(roop.globals.source_path)):
41
+ update_status('No face in source path detected.', NAME)
42
+ return False
43
+ if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
44
+ update_status('Select an image or video for target path.', NAME)
45
+ return False
46
+ return True
47
+
48
+
49
+ def post_process() -> None:
50
+ global FACE_SWAPPER
51
+
52
+ FACE_SWAPPER = None
53
+
54
+
55
+ def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
56
+ return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)
57
+
58
+
59
+ def process_frame(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
60
+ global DIST_THRESHOLD
61
+
62
+ if roop.globals.many_faces:
63
+ many_faces = get_all_faces(temp_frame)
64
+ if many_faces:
65
+ for target_face in many_faces:
66
+ if target_face['det_score'] > 0.65:
67
+ temp_frame = swap_face(source_face, target_face, temp_frame)
68
+ else:
69
+ if target_face:
70
+ target_embedding = target_face.embedding
71
+ many_faces = get_all_faces(temp_frame)
72
+ target_face = None
73
+ for dest_face in many_faces:
74
+ dest_embedding = dest_face.embedding
75
+ if compute_cosine_distance(target_embedding, dest_embedding) <= DIST_THRESHOLD:
76
+ target_face = dest_face
77
+ break
78
+ if target_face:
79
+ temp_frame = swap_face(source_face, target_face, temp_frame)
80
+ return temp_frame
81
+
82
+ target_face = get_first_face(temp_frame)
83
+ if target_face:
84
+ temp_frame = swap_face(source_face, target_face, temp_frame)
85
+ return temp_frame
86
+
87
+
88
+
89
+ def process_frames(is_batch: bool, source_face: Face, target_face: Face, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
90
+ for temp_frame_path in temp_frame_paths:
91
+ temp_frame = cv2.imread(temp_frame_path)
92
+ if temp_frame is not None:
93
+ result = process_frame(source_face, target_face, temp_frame)
94
+ if result is not None:
95
+ if is_batch:
96
+ tf = get_destfilename_from_path(temp_frame_path, roop.globals.output_path, '_fake.png')
97
+ cv2.imwrite(tf, result)
98
+ else:
99
+ cv2.imwrite(temp_frame_path, result)
100
+ if update:
101
+ update()
102
+
103
+
104
+ def process_image(source_face: Any, target_face: Any, target_path: str, output_path: str) -> None:
105
+ global DIST_THRESHOLD
106
+
107
+ target_frame = cv2.imread(target_path)
108
+ if target_frame is not None:
109
+ result = process_frame(source_face, target_face, target_frame)
110
+ if result is not None:
111
+ cv2.imwrite(output_path, result)
112
+
113
+
114
+ def process_video(source_face: Any, target_face: Any, temp_frame_paths: List[str]) -> None:
115
+ global DIST_THRESHOLD
116
+
117
+ roop.processors.frame.core.process_video(source_face, target_face, temp_frame_paths, process_frames)
118
+
119
+
120
+ def process_batch_images(source_face: Any, target_face: Any, temp_frame_paths: List[str]) -> None:
121
+ global DIST_THRESHOLD
122
+
123
+ roop.processors.frame.core.process_batch(source_face, target_face, temp_frame_paths, process_frames)
roop/typing.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ from insightface.app.common import Face
4
+ import numpy
5
+
6
+ Face = Face
7
+ Frame = numpy.ndarray[Any, Any]
roop/ui.json ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "CTk": {
3
+ "fg_color": ["gray95", "gray10"]
4
+ },
5
+ "CTkToplevel": {
6
+ "fg_color": ["gray95", "gray10"]
7
+ },
8
+ "CTkFrame": {
9
+ "corner_radius": 6,
10
+ "border_width": 0,
11
+ "fg_color": ["gray90", "gray13"],
12
+ "top_fg_color": ["gray85", "gray16"],
13
+ "border_color": ["gray65", "gray28"]
14
+ },
15
+ "CTkButton": {
16
+ "corner_radius": 6,
17
+ "border_width": 0,
18
+ "fg_color": ["#3a7ebf", "#1f538d"],
19
+ "hover_color": ["#325882", "#14375e"],
20
+ "border_color": ["#3E454A", "#949A9F"],
21
+ "text_color": ["#DCE4EE", "#DCE4EE"],
22
+ "text_color_disabled": ["gray74", "gray60"]
23
+ },
24
+ "CTkLabel": {
25
+ "corner_radius": 0,
26
+ "fg_color": "transparent",
27
+ "text_color": ["gray14", "gray84"]
28
+ },
29
+ "CTkEntry": {
30
+ "corner_radius": 6,
31
+ "border_width": 2,
32
+ "fg_color": ["#F9F9FA", "#343638"],
33
+ "border_color": ["#979DA2", "#565B5E"],
34
+ "text_color": ["gray14", "gray84"],
35
+ "placeholder_text_color": ["gray52", "gray62"]
36
+ },
37
+ "CTkCheckbox": {
38
+ "corner_radius": 6,
39
+ "border_width": 3,
40
+ "fg_color": ["#3a7ebf", "#1f538d"],
41
+ "border_color": ["#3E454A", "#949A9F"],
42
+ "hover_color": ["#325882", "#14375e"],
43
+ "checkmark_color": ["#DCE4EE", "gray90"],
44
+ "text_color": ["gray14", "gray84"],
45
+ "text_color_disabled": ["gray60", "gray45"]
46
+ },
47
+ "CTkSwitch": {
48
+ "corner_radius": 1000,
49
+ "border_width": 3,
50
+ "button_length": 0,
51
+ "fg_color": ["#939BA2", "#4A4D50"],
52
+ "progress_color": ["#3a7ebf", "#1f538d"],
53
+ "button_color": ["gray36", "#D5D9DE"],
54
+ "button_hover_color": ["gray20", "gray100"],
55
+ "text_color": ["gray14", "gray84"],
56
+ "text_color_disabled": ["gray60", "gray45"]
57
+ },
58
+ "CTkRadiobutton": {
59
+ "corner_radius": 1000,
60
+ "border_width_checked": 6,
61
+ "border_width_unchecked": 3,
62
+ "fg_color": ["#3a7ebf", "#1f538d"],
63
+ "border_color": ["#3E454A", "#949A9F"],
64
+ "hover_color": ["#325882", "#14375e"],
65
+ "text_color": ["gray14", "gray84"],
66
+ "text_color_disabled": ["gray60", "gray45"]
67
+ },
68
+ "CTkProgressBar": {
69
+ "corner_radius": 1000,
70
+ "border_width": 0,
71
+ "fg_color": ["#939BA2", "#4A4D50"],
72
+ "progress_color": ["#3a7ebf", "#1f538d"],
73
+ "border_color": ["gray", "gray"]
74
+ },
75
+ "CTkSlider": {
76
+ "corner_radius": 1000,
77
+ "button_corner_radius": 1000,
78
+ "border_width": 6,
79
+ "button_length": 0,
80
+ "fg_color": ["#939BA2", "#4A4D50"],
81
+ "progress_color": ["gray40", "#AAB0B5"],
82
+ "button_color": ["#3a7ebf", "#1f538d"],
83
+ "button_hover_color": ["#325882", "#14375e"]
84
+ },
85
+ "CTkOptionMenu": {
86
+ "corner_radius": 6,
87
+ "fg_color": ["#3a7ebf", "#1f538d"],
88
+ "button_color": ["#325882", "#14375e"],
89
+ "button_hover_color": ["#234567", "#1e2c40"],
90
+ "text_color": ["#DCE4EE", "#DCE4EE"],
91
+ "text_color_disabled": ["gray74", "gray60"]
92
+ },
93
+ "CTkComboBox": {
94
+ "corner_radius": 6,
95
+ "border_width": 2,
96
+ "fg_color": ["#F9F9FA", "#343638"],
97
+ "border_color": ["#979DA2", "#565B5E"],
98
+ "button_color": ["#979DA2", "#565B5E"],
99
+ "button_hover_color": ["#6E7174", "#7A848D"],
100
+ "text_color": ["gray14", "gray84"],
101
+ "text_color_disabled": ["gray50", "gray45"]
102
+ },
103
+ "CTkScrollbar": {
104
+ "corner_radius": 1000,
105
+ "border_spacing": 4,
106
+ "fg_color": "transparent",
107
+ "button_color": ["gray55", "gray41"],
108
+ "button_hover_color": ["gray40", "gray53"]
109
+ },
110
+ "CTkSegmentedButton": {
111
+ "corner_radius": 6,
112
+ "border_width": 2,
113
+ "fg_color": ["#979DA2", "gray29"],
114
+ "selected_color": ["#3a7ebf", "#1f538d"],
115
+ "selected_hover_color": ["#325882", "#14375e"],
116
+ "unselected_color": ["#979DA2", "gray29"],
117
+ "unselected_hover_color": ["gray70", "gray41"],
118
+ "text_color": ["#DCE4EE", "#DCE4EE"],
119
+ "text_color_disabled": ["gray74", "gray60"]
120
+ },
121
+ "CTkTextbox": {
122
+ "corner_radius": 6,
123
+ "border_width": 0,
124
+ "fg_color": ["gray100", "gray20"],
125
+ "border_color": ["#979DA2", "#565B5E"],
126
+ "text_color": ["gray14", "gray84"],
127
+ "scrollbar_button_color": ["gray55", "gray41"],
128
+ "scrollbar_button_hover_color": ["gray40", "gray53"]
129
+ },
130
+ "CTkScrollableFrame": {
131
+ "label_fg_color": ["gray80", "gray21"]
132
+ },
133
+ "DropdownMenu": {
134
+ "fg_color": ["gray90", "gray20"],
135
+ "hover_color": ["gray75", "gray28"],
136
+ "text_color": ["gray14", "gray84"]
137
+ },
138
+ "CTkFont": {
139
+ "macOS": {
140
+ "family": "Avenir",
141
+ "size": 12,
142
+ "weight": "normal"
143
+ },
144
+ "Windows": {
145
+ "family": "Corbel",
146
+ "size": 12,
147
+ "weight": "normal"
148
+ },
149
+ "Linux": {
150
+ "family": "Montserrat",
151
+ "size": 12,
152
+ "weight": "normal"
153
+ }
154
+ },
155
+ "RoopDonate": {
156
+ "text_color": ["#3a7ebf", "gray60"]
157
+ }
158
+ }
roop/ui.py ADDED
@@ -0,0 +1,678 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import gradio as gr
4
+ import cv2
5
+ import pathlib
6
+ import shutil
7
+ import roop.globals
8
+ import roop.metadata
9
+ import roop.utilities as util
10
+
11
+ from roop.face_helper import extract_face_images
12
+ from roop.capturer import get_video_frame, get_video_frame_total, get_image_frame
13
+
14
+ restart_server = False
15
+ live_cam_active = False
16
+
17
+ RECENT_DIRECTORY_SOURCE = None
18
+ RECENT_DIRECTORY_TARGET = None
19
+ RECENT_DIRECTORY_OUTPUT = None
20
+
21
+ SELECTION_FACES_DATA = None
22
+
23
+ last_image = None
24
+
25
+ input_thumbs = []
26
+ target_thumbs = []
27
+
28
+
29
+ IS_INPUT = True
30
+ SELECTED_FACE_INDEX = 0
31
+
32
+ SELECTED_INPUT_FACE_INDEX = 0
33
+ SELECTED_TARGET_FACE_INDEX = 0
34
+
35
+ roop.globals.keep_fps = None
36
+ roop.globals.keep_frames = None
37
+ roop.globals.skip_audio = None
38
+ roop.globals.use_batch = None
39
+
40
+ input_faces = None
41
+ target_faces = None
42
+ face_selection = None
43
+ fake_cam_image = None
44
+
45
+ current_cam_image = None
46
+ cam_swapping = False
47
+
48
+ selected_preview_index = 0
49
+
50
+
51
+ def prepare_environment():
52
+ roop.globals.output_path = os.path.abspath(os.path.join(os.getcwd(), "output"))
53
+ os.makedirs(roop.globals.output_path, exist_ok=True)
54
+ os.environ["TEMP"] = os.environ["TMP"] = os.path.abspath(os.path.join(os.getcwd(), "temp"))
55
+ os.makedirs(os.environ["TEMP"], exist_ok=True)
56
+ os.environ["GRADIO_TEMP_DIR"] = os.environ["TEMP"]
57
+
58
+
59
+ def run():
60
+ from roop.core import suggest_execution_providers
61
+ global input_faces, target_faces, face_selection, fake_cam_image, restart_server, live_cam_active, on_settings_changed
62
+
63
+ prepare_environment()
64
+
65
+ available_themes = ["Default", "gradio/glass", "gradio/monochrome", "gradio/seafoam", "gradio/soft", "gstaff/xkcd", "freddyaboulton/dracula_revamped", "ysharma/steampunk"]
66
+ image_formats = ['jpg','png', 'webp']
67
+ video_formats = ['avi','mkv', 'mp4', 'webm']
68
+ video_codecs = ['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc']
69
+ providerlist = suggest_execution_providers()
70
+
71
+ server_name = roop.globals.CFG.server_name
72
+ if server_name is None or len(server_name) < 1:
73
+ server_name = None
74
+ server_port = roop.globals.CFG.server_port
75
+ if server_port <= 0:
76
+ server_port = None
77
+
78
+ settings_controls = []
79
+
80
+ live_cam_active = False
81
+ run_server = True
82
+
83
+ while run_server:
84
+ with gr.Blocks(title=f'{roop.metadata.name} {roop.metadata.version}', theme=roop.globals.CFG.selected_theme, css="span {color: var(--block-info-text-color)}") as ui:
85
+ with gr.Row(variant='panel'):
86
+ gr.Markdown(f"### [{roop.metadata.name} {roop.metadata.version}](https://github.com/C0untFloyd/roop-unleashed)")
87
+ gr.HTML(util.create_version_html(), elem_id="versions")
88
+ with gr.Tab("Face Swap"):
89
+ with gr.Row():
90
+ with gr.Column():
91
+ input_faces = gr.Gallery(label="Input faces", allow_preview=True, preview=True, height=128, object_fit="scale-down")
92
+ with gr.Row():
93
+ bt_remove_selected_input_face = gr.Button("Remove selected")
94
+ bt_clear_input_faces = gr.Button("Clear all", variant='stop')
95
+ bt_srcimg = gr.Image(label='Source Face Image', type='filepath', tool=None)
96
+ with gr.Column():
97
+ target_faces = gr.Gallery(label="Target faces", allow_preview=True, preview=True, height=128, object_fit="scale-down")
98
+ with gr.Row():
99
+ bt_remove_selected_target_face = gr.Button("Remove selected")
100
+ bt_destfiles = gr.Files(label='Target File(s)', file_count="multiple", elem_id='filelist')
101
+ with gr.Row():
102
+ with gr.Column(visible=False) as dynamic_face_selection:
103
+ face_selection = gr.Gallery(label="Detected faces", allow_preview=True, preview=True, height=256, object_fit="scale-down")
104
+ with gr.Row():
105
+ bt_faceselect = gr.Button("Use selected face")
106
+ bt_cancelfaceselect = gr.Button("Cancel")
107
+
108
+ with gr.Row():
109
+ with gr.Column():
110
+ selected_face_detection = gr.Dropdown(["First found", "All faces", "Selected face", "All female", "All male"], value="First found", label="Select face swapping method")
111
+ max_face_distance = gr.Slider(0.01, 1.0, value=0.65, label="Max Face Similarity Threshold")
112
+ with gr.Column():
113
+ roop.globals.keep_fps = gr.Checkbox(label="Keep FPS", value=True)
114
+ roop.globals.keep_frames = gr.Checkbox(label="Keep Frames", value=False)
115
+ roop.globals.skip_audio = gr.Checkbox(label="Skip audio", value=False)
116
+ with gr.Row():
117
+ with gr.Column():
118
+ selected_enhancer = gr.Dropdown(["None", "Codeformer", "DMDNet", "GFPGAN"], value="None", label="Select post-processing")
119
+ with gr.Accordion(label="Masking", open=True):
120
+ chk_useclip = gr.Checkbox(label="Use Text to Clip Masking", value=False)
121
+ clip_text = gr.Textbox(label="List of objects to mask and restore back on fake image", placeholder="hands,hair")
122
+
123
+ with gr.Column():
124
+ blend_ratio = gr.Slider(0.0, 1.0, value=0.65, label="Original/Enhanced image blend ratio")
125
+ with gr.Row(variant='panel'):
126
+ with gr.Column():
127
+ bt_start = gr.Button("Start", variant='primary')
128
+ with gr.Column():
129
+ gr.Markdown(' ')
130
+ with gr.Column():
131
+ fake_preview = gr.Checkbox(label="Face swap frames", value=False)
132
+ with gr.Column():
133
+ bt_refresh_preview = gr.Button("Refresh", variant='secondary')
134
+ with gr.Row(variant='panel'):
135
+ with gr.Column():
136
+ with gr.Accordion(label="Results", open=True):
137
+ resultfiles = gr.Files(label='Processed File(s)', interactive=False)
138
+ resultimage = gr.Image(type='filepath', interactive=False)
139
+ with gr.Column():
140
+ with gr.Accordion(label="Preview Original/Fake Frame", open=True):
141
+ previewimage = gr.Image(label="Preview Image", interactive=False)
142
+ with gr.Column():
143
+ preview_frame_num = gr.Slider(0, 0, value=0, label="Frame Number", step=1.0)
144
+ bt_use_face_from_preview = gr.Button("Use Face from this Frame", variant='primary')
145
+
146
+ with gr.Tab("Live Cam"):
147
+ cam_toggle = gr.Checkbox(label='Activate', value=live_cam_active)
148
+ if live_cam_active:
149
+ with gr.Row():
150
+ with gr.Column():
151
+ cam = gr.Webcam(label='Camera', source='webcam', interactive=True, streaming=False)
152
+ with gr.Column():
153
+ fake_cam_image = gr.Image(label='Fake Camera Output', interactive=False)
154
+
155
+
156
+ with gr.Tab("Extras"):
157
+ with gr.Row():
158
+ files_to_process = gr.Files(label='File(s) to process', file_count="multiple")
159
+ with gr.Row(variant='panel'):
160
+ with gr.Accordion(label="Post process", open=False):
161
+ with gr.Column():
162
+ selected_post_enhancer = gr.Dropdown(["None", "Codeformer", "GFPGAN"], value="None", label="Select post-processing")
163
+ with gr.Column():
164
+ gr.Button("Start").click(fn=lambda: gr.Info('Not yet implemented...'))
165
+ with gr.Row(variant='panel'):
166
+ with gr.Accordion(label="Video/GIF", open=False):
167
+ with gr.Row(variant='panel'):
168
+ with gr.Column():
169
+ gr.Markdown("""
170
+ # Cut video
171
+ Be aware that this means re-encoding the video which might take a longer time.
172
+ Encoding uses your configuration from the Settings Tab.
173
+ """)
174
+ with gr.Column():
175
+ cut_start_time = gr.Slider(0, 100000, value=0, label="Start Frame", step=1.0, interactive=True)
176
+ with gr.Column():
177
+ cut_end_time = gr.Slider(1, 100000, value=1, label="End Frame", step=1.0, interactive=True)
178
+ with gr.Column():
179
+ start_cut_video = gr.Button("Start")
180
+
181
+ # with gr.Row(variant='panel'):
182
+ # with gr.Column():
183
+ # gr.Markdown("""
184
+ # # Join videos
185
+ # This also re-encodes the videos like cutting above.
186
+ # """)
187
+ # with gr.Column():
188
+ # start_join_videos = gr.Button("Start")
189
+ with gr.Row(variant='panel'):
190
+ gr.Markdown("Extract frames from video")
191
+ start_extract_frames = gr.Button("Start")
192
+ with gr.Row(variant='panel'):
193
+ gr.Markdown("Create video from image files")
194
+ gr.Button("Start").click(fn=lambda: gr.Info('Not yet implemented...'))
195
+ with gr.Row(variant='panel'):
196
+ gr.Markdown("Create GIF from video")
197
+ start_create_gif = gr.Button("Create GIF")
198
+ with gr.Row():
199
+ extra_files_output = gr.Files(label='Resulting output files', file_count="multiple")
200
+
201
+
202
+ with gr.Tab("Settings"):
203
+ with gr.Row():
204
+ with gr.Column():
205
+ themes = gr.Dropdown(available_themes, label="Theme", info="Change needs complete restart", value=roop.globals.CFG.selected_theme)
206
+ with gr.Column():
207
+ settings_controls.append(gr.Checkbox(label="Public Server", value=roop.globals.CFG.server_share, elem_id='server_share', interactive=True))
208
+ settings_controls.append(gr.Checkbox(label='Clear output folder before each run', value=roop.globals.CFG.clear_output, elem_id='clear_output', interactive=True))
209
+ with gr.Column():
210
+ input_server_name = gr.Textbox(label="Server Name", lines=1, info="Leave blank to run locally", value=roop.globals.CFG.server_name)
211
+ with gr.Column():
212
+ input_server_port = gr.Number(label="Server Port", precision=0, info="Leave at 0 to use default", value=roop.globals.CFG.server_port)
213
+ with gr.Row():
214
+ with gr.Column():
215
+ max_threads = gr.Slider(1, 64, value=roop.globals.CFG.max_threads, label="Max. Number of Threads", info='default: 8', step=1.0, interactive=True)
216
+ settings_controls.append(gr.Dropdown(image_formats, label="Image Output Format", info='default: png', value=roop.globals.CFG.output_image_format, elem_id='output_image_format', interactive=True))
217
+ button_clean_temp = gr.Button("Clean temp folder")
218
+ with gr.Column():
219
+ settings_controls.append(gr.Dropdown(providerlist, label="Provider", value=roop.globals.CFG.provider, elem_id='provider', interactive=True))
220
+ settings_controls.append(gr.Dropdown(video_formats, label="Video Output Format", info='default: mp4', value=roop.globals.CFG.output_video_format, elem_id='output_video_format', interactive=True))
221
+ button_apply_settings = gr.Button("Apply Settings")
222
+ with gr.Column():
223
+ settings_controls.append(gr.Dropdown(video_codecs, label="Video Codec", info='default: libx264', value=roop.globals.CFG.output_video_codec, elem_id='output_video_codec', interactive=True))
224
+ video_quality = gr.Slider(0, 100, value=roop.globals.CFG.video_quality, label="Video Quality (crf)", info='default: 14', step=1.0, interactive=True)
225
+ with gr.Column():
226
+ button_apply_restart = gr.Button("Restart Server", variant='primary')
227
+
228
+ input_faces.select(on_select_input_face, None, None)
229
+ bt_remove_selected_input_face.click(fn=remove_selected_input_face, outputs=[input_faces])
230
+ bt_srcimg.change(fn=on_srcimg_changed, show_progress='full', inputs=bt_srcimg, outputs=[dynamic_face_selection, face_selection, input_faces])
231
+
232
+
233
+ target_faces.select(on_select_target_face, None, None)
234
+ bt_remove_selected_target_face.click(fn=remove_selected_target_face, outputs=[target_faces])
235
+
236
+ bt_destfiles.select(fn=on_destfiles_selected, inputs=[bt_destfiles], outputs=[previewimage, preview_frame_num])
237
+ bt_destfiles.clear(fn=on_clear_destfiles, outputs=[target_faces])
238
+ resultfiles.select(fn=on_resultfiles_selected, inputs=[resultfiles], outputs=[resultimage])
239
+
240
+ face_selection.select(on_select_face, None, None)
241
+ bt_faceselect.click(fn=on_selected_face, outputs=[dynamic_face_selection, face_selection, input_faces, target_faces])
242
+ bt_clear_input_faces.click(fn=on_clear_input_faces, outputs=[input_faces])
243
+
244
+ bt_start.click(fn=start_swap,
245
+ inputs=[selected_enhancer, selected_face_detection, roop.globals.keep_fps, roop.globals.keep_frames,
246
+ roop.globals.skip_audio, max_face_distance, blend_ratio, bt_destfiles, chk_useclip, clip_text],
247
+ outputs=[resultfiles, resultimage])
248
+
249
+ previewinputs = [preview_frame_num, bt_destfiles, fake_preview, selected_enhancer, selected_face_detection,
250
+ max_face_distance, blend_ratio, bt_destfiles, chk_useclip, clip_text]
251
+ bt_refresh_preview.click(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage])
252
+ fake_preview.change(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage])
253
+ preview_frame_num.change(fn=on_preview_frame_changed, inputs=previewinputs, outputs=[previewimage], show_progress='hidden')
254
+ bt_use_face_from_preview.click(fn=on_use_face_from_selected, show_progress='full', inputs=[bt_destfiles, preview_frame_num], outputs=[dynamic_face_selection, face_selection, target_faces])
255
+
256
+
257
+ # Live Cam
258
+ cam_toggle.change(fn=on_cam_toggle, inputs=[cam_toggle])
259
+ if live_cam_active:
260
+ cam.stream(on_stream_swap_cam, inputs=[cam, selected_enhancer, blend_ratio], outputs=[fake_cam_image], show_progress="hidden")
261
+
262
+ # Extras
263
+ start_cut_video.click(fn=on_cut_video, inputs=[files_to_process, cut_start_time, cut_end_time], outputs=[extra_files_output])
264
+ # start_join_videos.click(fn=on_join_videos, inputs=[files_to_process], outputs=[extra_files_output])
265
+ start_extract_frames.click(fn=on_extract_frames, inputs=[files_to_process], outputs=[extra_files_output])
266
+ start_create_gif.click(fn=on_create_gif, inputs=[files_to_process], outputs=[extra_files_output])
267
+
268
+ # Settings
269
+ for s in settings_controls:
270
+ s.select(fn=on_settings_changed)
271
+ max_threads.input(fn=lambda a,b='max_threads':on_settings_changed_misc(a,b), inputs=[max_threads])
272
+ video_quality.input(fn=lambda a,b='video_quality':on_settings_changed_misc(a,b), inputs=[video_quality])
273
+
274
+ button_clean_temp.click(fn=clean_temp, outputs=[bt_srcimg, input_faces, target_faces, bt_destfiles])
275
+ button_apply_settings.click(apply_settings, inputs=[themes, input_server_name, input_server_port])
276
+ button_apply_restart.click(restart)
277
+
278
+
279
+
280
+ restart_server = False
281
+ try:
282
+ ui.queue().launch(inbrowser=True, server_name=server_name, server_port=server_port, prevent_thread_lock=True, show_error=True)
283
+ except:
284
+ restart_server = True
285
+ run_server = False
286
+ try:
287
+ while restart_server == False:
288
+ time.sleep(5.0)
289
+ except (KeyboardInterrupt, OSError):
290
+ print("Keyboard interruption in main thread... closing server.")
291
+ run_server = False
292
+ ui.close()
293
+
294
+ def on_settings_changed_misc(new_val, attribname):
295
+ if hasattr(roop.globals.CFG, attribname):
296
+ setattr(roop.globals.CFG, attribname, new_val)
297
+ else:
298
+ print("Didn't find attrib!")
299
+
300
+
301
+
302
+ def on_settings_changed(evt: gr.SelectData):
303
+ attribname = evt.target.elem_id
304
+ if isinstance(evt.target, gr.Checkbox):
305
+ if hasattr(roop.globals.CFG, attribname):
306
+ setattr(roop.globals.CFG, attribname, evt.selected)
307
+ return
308
+ elif isinstance(evt.target, gr.Dropdown):
309
+ if hasattr(roop.globals.CFG, attribname):
310
+ setattr(roop.globals.CFG, attribname, evt.value)
311
+ return
312
+
313
+ raise gr.Error(f'Unhandled Setting for {evt.target}')
314
+
315
+
316
+
317
+ def on_srcimg_changed(imgsrc, progress=gr.Progress()):
318
+ global RECENT_DIRECTORY_SOURCE, SELECTION_FACES_DATA, IS_INPUT, input_faces, face_selection, input_thumbs, last_image
319
+
320
+ IS_INPUT = True
321
+
322
+ if imgsrc == None or last_image == imgsrc:
323
+ return gr.Column.update(visible=False), None, input_thumbs
324
+
325
+ last_image = imgsrc
326
+
327
+ progress(0, desc="Retrieving faces from image", )
328
+ source_path = imgsrc
329
+ thumbs = []
330
+ if util.is_image(source_path):
331
+ roop.globals.source_path = source_path
332
+ RECENT_DIRECTORY_SOURCE = os.path.dirname(roop.globals.source_path)
333
+ SELECTION_FACES_DATA = extract_face_images(roop.globals.source_path, (False, 0))
334
+ progress(0.5, desc="Retrieving faces from image")
335
+ for f in SELECTION_FACES_DATA:
336
+ image = convert_to_gradio(f[1])
337
+ thumbs.append(image)
338
+
339
+ progress(1.0, desc="Retrieving faces from image")
340
+ if len(thumbs) < 1:
341
+ raise gr.Error('No faces detected!')
342
+
343
+ if len(thumbs) == 1:
344
+ roop.globals.SELECTED_FACE_DATA_INPUT = SELECTION_FACES_DATA[0][0]
345
+ input_thumbs.append(thumbs[0])
346
+ return gr.Column.update(visible=False), None, input_thumbs
347
+
348
+ return gr.Column.update(visible=True), thumbs, gr.Gallery.update(visible=True)
349
+
350
+ def on_select_input_face(evt: gr.SelectData):
351
+ global SELECTED_INPUT_FACE_INDEX
352
+
353
+ SELECTED_INPUT_FACE_INDEX = evt.index
354
+
355
+ def remove_selected_input_face():
356
+ global input_thumbs, SELECTED_INPUT_FACE_INDEX
357
+
358
+ if len(input_thumbs) > SELECTED_INPUT_FACE_INDEX:
359
+ f = input_thumbs.pop(SELECTED_INPUT_FACE_INDEX)
360
+ del f
361
+
362
+ return input_thumbs
363
+
364
+ def on_select_target_face(evt: gr.SelectData):
365
+ global SELECTED_TARGET_FACE_INDEX
366
+
367
+ SELECTED_TARGET_FACE_INDEX = evt.index
368
+
369
+ def remove_selected_target_face():
370
+ global target_thumbs, SELECTED_TARGET_FACE_INDEX
371
+
372
+ if len(target_thumbs) > SELECTED_TARGET_FACE_INDEX:
373
+ f = target_thumbs.pop(SELECTED_TARGET_FACE_INDEX)
374
+ del f
375
+ return target_thumbs
376
+
377
+
378
+
379
+
380
+
381
+ def on_use_face_from_selected(files, frame_num):
382
+ global IS_INPUT, SELECTION_FACES_DATA
383
+
384
+ IS_INPUT = False
385
+ thumbs = []
386
+
387
+ roop.globals.target_path = files[selected_preview_index].name
388
+ if util.is_image(roop.globals.target_path) and not roop.globals.target_path.lower().endswith(('gif')):
389
+ SELECTION_FACES_DATA = extract_face_images(roop.globals.target_path, (False, 0))
390
+ if len(SELECTION_FACES_DATA) > 0:
391
+ for f in SELECTION_FACES_DATA:
392
+ image = convert_to_gradio(f[1])
393
+ thumbs.append(image)
394
+ else:
395
+ gr.Info('No faces detected!')
396
+ roop.globals.target_path = None
397
+
398
+ elif util.is_video(roop.globals.target_path) or roop.globals.target_path.lower().endswith(('gif')):
399
+ selected_frame = frame_num
400
+ SELECTION_FACES_DATA = extract_face_images(roop.globals.target_path, (True, selected_frame))
401
+ if len(SELECTION_FACES_DATA) > 0:
402
+ for f in SELECTION_FACES_DATA:
403
+ image = convert_to_gradio(f[1])
404
+ thumbs.append(image)
405
+ else:
406
+ gr.Info('No faces detected!')
407
+ roop.globals.target_path = None
408
+
409
+ if len(thumbs) == 1:
410
+ roop.globals.SELECTED_FACE_DATA_OUTPUT = SELECTION_FACES_DATA[0][0]
411
+ target_thumbs.append(thumbs[0])
412
+ return gr.Row.update(visible=False), None, target_thumbs
413
+
414
+ return gr.Row.update(visible=True), thumbs, gr.Gallery.update(visible=True)
415
+
416
+
417
+
418
+ def on_select_face(evt: gr.SelectData): # SelectData is a subclass of EventData
419
+ global SELECTED_FACE_INDEX
420
+ SELECTED_FACE_INDEX = evt.index
421
+
422
+
423
+ def on_selected_face():
424
+ global IS_INPUT, SELECTED_FACE_INDEX, SELECTION_FACES_DATA, input_thumbs, target_thumbs
425
+
426
+ fd = SELECTION_FACES_DATA[SELECTED_FACE_INDEX]
427
+ image = convert_to_gradio(fd[1])
428
+ if IS_INPUT:
429
+ roop.globals.SELECTED_FACE_DATA_INPUT = fd[0]
430
+ input_thumbs.append(image)
431
+ return gr.Column.update(visible=False), None, input_thumbs, gr.Gallery.update(visible=True)
432
+ else:
433
+ roop.globals.SELECTED_FACE_DATA_OUTPUT = fd[0]
434
+ target_thumbs.append(image)
435
+ return gr.Column.update(visible=False), None, gr.Gallery.update(visible=True), target_thumbs
436
+
437
+ # bt_faceselect.click(fn=on_selected_face, outputs=[dynamic_face_selection, face_selection, input_faces, target_faces])
438
+
439
+
440
+
441
+
442
+ def on_preview_frame_changed(frame_num, files, fake_preview, enhancer, detection, face_distance, blend_ratio, target_files, use_clip, clip_text):
443
+ from roop.core import live_swap
444
+
445
+ filename = files[selected_preview_index].name
446
+ if util.is_video(filename) or filename.lower().endswith('gif'):
447
+ current_frame = get_video_frame(filename, frame_num)
448
+ else:
449
+ current_frame = get_image_frame(filename)
450
+ if current_frame is None:
451
+ return None
452
+
453
+ if not fake_preview or roop.globals.SELECTED_FACE_DATA_INPUT is None:
454
+ return convert_to_gradio(current_frame)
455
+
456
+ roop.globals.face_swap_mode = translate_swap_mode(detection)
457
+ roop.globals.selected_enhancer = enhancer
458
+ roop.globals.distance_threshold = face_distance
459
+ roop.globals.blend_ratio = blend_ratio
460
+
461
+ if use_clip and clip_text is None or len(clip_text) < 1:
462
+ use_clip = False
463
+
464
+ roop.globals.execution_threads = roop.globals.CFG.max_threads
465
+ current_frame = live_swap(current_frame, roop.globals.face_swap_mode, use_clip, clip_text)
466
+ if current_frame is None:
467
+ return None
468
+ return convert_to_gradio(current_frame)
469
+
470
+
471
+
472
+
473
+ def on_clear_input_faces():
474
+ global input_thumbs
475
+
476
+ input_thumbs = []
477
+ roop.globals.SELECTED_FACE_DATA_INPUT = None
478
+ return input_thumbs
479
+
480
+ def on_clear_destfiles():
481
+ global target_thumbs
482
+
483
+ roop.globals.SELECTED_FACE_DATA_OUTPUT = None
484
+ target_thumbs = []
485
+ return target_thumbs
486
+
487
+
488
+
489
+ def translate_swap_mode(dropdown_text):
490
+ if dropdown_text == "Selected face":
491
+ return "selected"
492
+ elif dropdown_text == "First found":
493
+ return "first"
494
+ elif dropdown_text == "All female":
495
+ return "all_female"
496
+ elif dropdown_text == "All male":
497
+ return "all_male"
498
+
499
+ return "all"
500
+
501
+
502
+
503
+ def start_swap(enhancer, detection, keep_fps, keep_frames, skip_audio, face_distance, blend_ratio, target_files, use_clip, clip_text):
504
+ from roop.core import batch_process
505
+
506
+ if target_files is None or len(target_files) <= 0:
507
+ return None, None
508
+
509
+ if roop.globals.CFG.clear_output:
510
+ shutil.rmtree(roop.globals.output_path)
511
+
512
+ prepare_environment()
513
+
514
+ roop.globals.selected_enhancer = enhancer
515
+ roop.globals.target_path = None
516
+ roop.globals.distance_threshold = face_distance
517
+ roop.globals.blend_ratio = blend_ratio
518
+ roop.globals.keep_fps = keep_fps
519
+ roop.globals.keep_frames = keep_frames
520
+ roop.globals.skip_audio = skip_audio
521
+ roop.globals.face_swap_mode = translate_swap_mode(detection)
522
+ if use_clip and clip_text is None or len(clip_text) < 1:
523
+ use_clip = False
524
+
525
+ if roop.globals.face_swap_mode == 'selected':
526
+ if roop.globals.SELECTED_FACE_DATA_OUTPUT is None or len(roop.globals.SELECTED_FACE_DATA_OUTPUT) < 1:
527
+ gr.Error('No Target Face selected!')
528
+ return None, None
529
+
530
+ roop.globals.execution_threads = roop.globals.CFG.max_threads
531
+ roop.globals.video_encoder = roop.globals.CFG.output_video_codec
532
+ roop.globals.video_quality = roop.globals.CFG.video_quality
533
+
534
+ batch_process([file.name for file in target_files], use_clip, clip_text)
535
+ outdir = pathlib.Path(roop.globals.output_path)
536
+ outfiles = [item for item in outdir.iterdir() if item.is_file()]
537
+ if len(outfiles) > 0:
538
+ return outfiles, outfiles[0]
539
+ return None, None
540
+
541
+
542
+
543
+ def on_destfiles_selected(evt: gr.SelectData, target_files):
544
+ global selected_preview_index
545
+
546
+ selected_preview_index = evt.index
547
+ filename = target_files[selected_preview_index].name
548
+ if util.is_video(filename) or filename.lower().endswith('gif'):
549
+ current_frame = get_video_frame(filename, 0)
550
+ total_frames = get_video_frame_total(filename)
551
+ else:
552
+ current_frame = get_image_frame(filename)
553
+ total_frames = 0
554
+
555
+ current_frame = convert_to_gradio(current_frame)
556
+ return current_frame, gr.Slider.update(value=0, maximum=total_frames)
557
+
558
+
559
+ def on_resultfiles_selected(evt: gr.SelectData, files):
560
+ selected_index = evt.index
561
+ filename = files[selected_index].name
562
+ if util.is_video(filename) or filename.lower().endswith('gif'):
563
+ current_frame = get_video_frame(filename, 0)
564
+ else:
565
+ current_frame = get_image_frame(filename)
566
+ return convert_to_gradio(current_frame)
567
+
568
+
569
+
570
+ def on_cam_toggle(state):
571
+ global live_cam_active, restart_server
572
+
573
+ live_cam_active = state
574
+ gr.Warning('Server will be restarted for this change!')
575
+ restart_server = True
576
+
577
+
578
+ def on_stream_swap_cam(camimage, enhancer, blend_ratio):
579
+ from roop.core import live_swap
580
+ global current_cam_image, cam_counter, cam_swapping, fake_cam_image
581
+
582
+ roop.globals.selected_enhancer = enhancer
583
+ roop.globals.blend_ratio = blend_ratio
584
+
585
+ if not cam_swapping and roop.globals.SELECTED_FACE_DATA_INPUT is not None:
586
+ cam_swapping = True
587
+ current_cam_image = live_swap(camimage, "all", False, None)
588
+ cam_swapping = False
589
+
590
+ return current_cam_image
591
+
592
+
593
+ def on_cut_video(files, cut_start_frame, cut_end_frame):
594
+ resultfiles = []
595
+ for tf in files:
596
+ f = tf.name
597
+ # destfile = get_destfilename_from_path(f, resolve_relative_path('./output'), '_cut')
598
+ destfile = util.get_destfilename_from_path(f, './output', '_cut')
599
+ util.cut_video(f, destfile, cut_start_frame, cut_end_frame)
600
+ if os.path.isfile(destfile):
601
+ resultfiles.append(destfile)
602
+ else:
603
+ gr.Error('Cutting video failed!')
604
+ return resultfiles
605
+
606
+ def on_join_videos(files):
607
+ filenames = []
608
+ for f in files:
609
+ filenames.append(f.name)
610
+ destfile = util.get_destfilename_from_path(filenames[0], './output', '_join')
611
+ util.join_videos(filenames, destfile)
612
+ resultfiles = []
613
+ if os.path.isfile(destfile):
614
+ resultfiles.append(destfile)
615
+ else:
616
+ gr.Error('Joining videos failed!')
617
+ return resultfiles
618
+
619
+
620
+
621
+
622
+ def on_extract_frames(files):
623
+ resultfiles = []
624
+ for tf in files:
625
+ f = tf.name
626
+ resfolder = util.extract_frames(f)
627
+ for file in os.listdir(resfolder):
628
+ outfile = os.path.join(resfolder, file)
629
+ if os.path.isfile(outfile):
630
+ resultfiles.append(outfile)
631
+ return resultfiles
632
+
633
+
634
+ def on_create_gif(files):
635
+ for tf in files:
636
+ f = tf.name
637
+ gifname = util.get_destfilename_from_path(f, './output', '.gif')
638
+ util.create_gif_from_video(f, gifname)
639
+
640
+ return gifname
641
+
642
+
643
+
644
+
645
+
646
+ def clean_temp():
647
+ global input_thumbs, target_thumbs
648
+
649
+ shutil.rmtree(os.environ["TEMP"])
650
+ prepare_environment()
651
+
652
+ input_thumbs = []
653
+ roop.globals.SELECTED_FACE_DATA_INPUT = None
654
+ roop.globals.SELECTED_FACE_DATA_OUTPUT = None
655
+ target_thumbs = []
656
+ gr.Info('Temp Files removed')
657
+ return None,None,None,None
658
+
659
+
660
+ def apply_settings(themes, input_server_name, input_server_port):
661
+ roop.globals.CFG.selected_theme = themes
662
+ roop.globals.CFG.server_name = input_server_name
663
+ roop.globals.CFG.server_port = input_server_port
664
+ roop.globals.CFG.save()
665
+ gr.Info('Settings saved')
666
+
667
+ def restart():
668
+ global restart_server
669
+ restart_server = True
670
+
671
+
672
+
673
+
674
+ # Gradio wants Images in RGB
675
+ def convert_to_gradio(image):
676
+ if image is None:
677
+ return None
678
+ return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
roop/ui_tk.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import customtkinter as ctk
3
+ import webbrowser
4
+ import cv2
5
+ import roop.globals
6
+ import roop.metadata
7
+
8
+ from typing import Callable, Tuple
9
+ from PIL import Image, ImageOps
10
+ from roop.face_helper import get_many_faces, get_one_face, extract_face_images
11
+ from roop.capturer import get_video_frame, get_video_frame_total
12
+ from roop.utilities import is_image, is_video, resolve_relative_path, open_with_default_app, compute_cosine_distance, has_extension
13
+
14
+ ROOT = None
15
+ ROOT_HEIGHT = 550
16
+ ROOT_WIDTH = 600
17
+
18
+ PREVIEW = None
19
+ PREVIEW_MAX_HEIGHT = 700
20
+ PREVIEW_MAX_WIDTH = 1200
21
+ IMAGE_BUTTON_WIDTH = 200
22
+ IMAGE_BUTTON_HEIGHT = 200
23
+
24
+ RECENT_DIRECTORY_SOURCE = None
25
+ RECENT_DIRECTORY_TARGET = None
26
+ RECENT_DIRECTORY_OUTPUT = None
27
+
28
+ preview_label = None
29
+ preview_slider = None
30
+ source_label = None
31
+ target_label = None
32
+ status_label = None
33
+ FACE_BUTTONS = []
34
+ INPUT_FACES_DATA = None
35
+ OUTPUT_FACES_DATA = None
36
+
37
+
38
+ def init(start: Callable, destroy: Callable) -> ctk.CTk:
39
+ global ROOT, PREVIEW, FACE_SELECT
40
+
41
+ ROOT = create_root(start, destroy)
42
+ PREVIEW = create_preview(ROOT)
43
+ FACE_SELECT = create_select_faces_win(ROOT)
44
+ return ROOT
45
+
46
+
47
+
48
+ def create_root(start: Callable, destroy: Callable) -> ctk.CTk:
49
+ global source_button, target_button, status_label
50
+
51
+ ctk.deactivate_automatic_dpi_awareness()
52
+ ctk.set_appearance_mode('system')
53
+ ctk.set_default_color_theme(resolve_relative_path('ui.json'))
54
+ root = ctk.CTk()
55
+ root.minsize(ROOT_WIDTH, ROOT_HEIGHT)
56
+ root.title(f'{roop.metadata.name} {roop.metadata.version}')
57
+ root.configure()
58
+ root.protocol('WM_DELETE_WINDOW', lambda: destroy())
59
+
60
+ base_x1 = 0.075
61
+ base_x2 = 0.575
62
+ base_y = 0.635
63
+
64
+ source_button = ctk.CTkButton(root, text='Select image with face(s)', width=IMAGE_BUTTON_WIDTH, height=IMAGE_BUTTON_HEIGHT, compound='top', anchor='center', command=lambda: select_source_path())
65
+ source_button.place(relx=base_x1, rely=0.05)
66
+
67
+ target_button = ctk.CTkButton(root, text='Select target image/video', width=IMAGE_BUTTON_WIDTH, height=IMAGE_BUTTON_HEIGHT, compound='top', anchor='center', command=lambda: select_target_path())
68
+ target_button.place(relx=base_x2, rely=0.05)
69
+
70
+ enhance_label = ctk.CTkLabel(root, text='Select face enhancement engine', anchor='w')
71
+ enhance_label.place(relx=base_x1, rely=0.49)
72
+ enhance_label.configure(text_color=ctk.ThemeManager.theme.get('RoopDonate').get('text_color'))
73
+
74
+ enhancer_cb = ctk.CTkComboBox(root, values=["None", "Codeformer", "DMDNet (unavailable)", "GFPGAN"], width=IMAGE_BUTTON_WIDTH, command=select_enhancer)
75
+ enhancer_cb.set("None")
76
+ enhancer_cb.place(relx=base_x1, rely=0.532)
77
+
78
+ keep_fps_value = ctk.BooleanVar(value=roop.globals.keep_fps)
79
+ keep_fps_checkbox = ctk.CTkSwitch(root, text='Keep fps', variable=keep_fps_value, command=lambda: setattr(roop.globals, 'keep_fps', not roop.globals.keep_fps))
80
+ keep_fps_checkbox.place(relx=base_x1, rely=base_y)
81
+
82
+ keep_frames_value = ctk.BooleanVar(value=roop.globals.keep_frames)
83
+ keep_frames_switch = ctk.CTkSwitch(root, text='Keep frames', variable=keep_frames_value, command=lambda: setattr(roop.globals, 'keep_frames', keep_frames_value.get()))
84
+ keep_frames_switch.place(relx=base_x1, rely=0.68)
85
+
86
+ skip_audio_value = ctk.BooleanVar(value=roop.globals.skip_audio)
87
+ skip_audio_switch = ctk.CTkSwitch(root, text='Skip audio', variable=skip_audio_value, command=lambda: setattr(roop.globals, 'skip_audio', skip_audio_value.get()))
88
+ skip_audio_switch.place(relx=base_x2, rely=base_y)
89
+
90
+ many_faces_value = ctk.BooleanVar(value=roop.globals.many_faces)
91
+ many_faces_switch = ctk.CTkSwitch(root, text='Many faces', variable=many_faces_value, command=lambda: setattr(roop.globals, 'many_faces', many_faces_value.get()))
92
+ many_faces_switch.place(relx=base_x2, rely=0.68)
93
+
94
+ use_batch_value = ctk.BooleanVar(value=roop.globals.use_batch)
95
+ use_batch_switch = ctk.CTkSwitch(root, text='Batch process folder', variable=use_batch_value, command=lambda: setattr(roop.globals, 'use_batch', use_batch_value.get()))
96
+ use_batch_switch.place(relx=base_x1, rely=0.725)
97
+
98
+
99
+
100
+ base_y = 0.84
101
+
102
+ start_button = ctk.CTkButton(root, text='Start', command=lambda: select_output_path(start))
103
+ start_button.place(relx=base_x1, rely=base_y, relwidth=0.15, relheight=0.05)
104
+
105
+ stop_button = ctk.CTkButton(root, text='Destroy', command=lambda: destroy())
106
+ stop_button.place(relx=0.35, rely=base_y, relwidth=0.15, relheight=0.05)
107
+
108
+ preview_button = ctk.CTkButton(root, text='Preview', command=lambda: toggle_preview())
109
+ preview_button.place(relx=0.55, rely=base_y, relwidth=0.15, relheight=0.05)
110
+
111
+ result_button = ctk.CTkButton(root, text='Show Result', command=lambda: show_result())
112
+ result_button.place(relx=0.75, rely=base_y, relwidth=0.15, relheight=0.05)
113
+
114
+ status_label = ctk.CTkLabel(root, text=None, justify='center')
115
+ status_label.place(relx=base_x1, rely=0.9, relwidth=0.8)
116
+
117
+ donate_label = ctk.CTkLabel(root, text='Visit the Github Page', justify='center', cursor='hand2')
118
+ donate_label.place(relx=0.1, rely=0.95, relwidth=0.8)
119
+ donate_label.configure(text_color=ctk.ThemeManager.theme.get('RoopDonate').get('text_color'))
120
+ donate_label.bind('<Button>', lambda event: webbrowser.open('https://github.com/C0untFloyd/roop-unleashed'))
121
+
122
+ return root
123
+
124
+ def create_preview(parent) -> ctk.CTkToplevel:
125
+ global preview_label, preview_slider
126
+
127
+ def create_preview(parent: ctk.CTkToplevel) -> ctk.CTkToplevel:
128
+ global preview_label, preview_slider
129
+
130
+ preview = ctk.CTkToplevel(parent)
131
+ preview.withdraw()
132
+ preview.title('Preview')
133
+ preview.configure()
134
+ preview.protocol('WM_DELETE_WINDOW', lambda: toggle_preview())
135
+ preview.resizable(width=False, height=False)
136
+
137
+ preview_label = ctk.CTkLabel(preview, text=None)
138
+ preview_label.pack(fill='both', expand=True)
139
+
140
+ preview_slider = ctk.CTkSlider(preview, from_=0, to=0, command=lambda frame_value: update_preview(frame_value))
141
+
142
+ return preview
143
+
144
+ def update_status(text: str) -> None:
145
+ status_label.configure(text=text)
146
+ ROOT.update()
147
+
148
+ def update_status(text: str) -> None:
149
+ status_label.configure(text=text)
150
+ ROOT.update()
151
+
152
+
153
+ def select_source_path() -> None:
154
+ global RECENT_DIRECTORY_SOURCE, INPUT_FACES_DATA
155
+
156
+ PREVIEW.withdraw()
157
+ source_path = ctk.filedialog.askopenfilename(title='Select source image', initialdir=RECENT_DIRECTORY_SOURCE)
158
+ image = None
159
+ if is_image(source_path):
160
+ roop.globals.source_path = source_path
161
+ RECENT_DIRECTORY_SOURCE = os.path.dirname(roop.globals.source_path)
162
+ INPUT_FACES_DATA = extract_face_images(roop.globals.source_path, (False, 0))
163
+ if len(INPUT_FACES_DATA) > 0:
164
+ if len(INPUT_FACES_DATA) == 1:
165
+ image = render_face_from_frame(INPUT_FACES_DATA[0][1], (IMAGE_BUTTON_WIDTH, IMAGE_BUTTON_HEIGHT))
166
+ roop.globals.SELECTED_FACE_DATA_INPUT = INPUT_FACES_DATA[0][0]
167
+ else:
168
+ show_face_selection(INPUT_FACES_DATA, True)
169
+ else:
170
+ print('No face found!')
171
+ roop.globals.source_path = None
172
+ else:
173
+ roop.globals.source_path = None
174
+ source_button.configure(image=image)
175
+ source_button._draw()
176
+
177
+
178
+ def select_target_path() -> None:
179
+ global RECENT_DIRECTORY_TARGET, OUTPUT_FACES_DATA
180
+
181
+ PREVIEW.withdraw()
182
+ target_path = ctk.filedialog.askopenfilename(title='Select target image or video', initialdir=RECENT_DIRECTORY_TARGET)
183
+ image = None
184
+ if is_image(target_path) and not target_path.lower().endswith(('gif')):
185
+ roop.globals.target_path = target_path
186
+ RECENT_DIRECTORY_TARGET = os.path.dirname(roop.globals.target_path)
187
+ if roop.globals.many_faces:
188
+ roop.globals.SELECTED_FACE_DATA_OUTPUT = None
189
+ image = render_image_preview(target_path, (IMAGE_BUTTON_WIDTH, IMAGE_BUTTON_HEIGHT))
190
+ else:
191
+ OUTPUT_FACES_DATA = extract_face_images(roop.globals.target_path, (False, 0))
192
+ if len(OUTPUT_FACES_DATA) > 0:
193
+ if len(OUTPUT_FACES_DATA) == 1:
194
+ image = render_face_from_frame(OUTPUT_FACES_DATA[0][1], (IMAGE_BUTTON_WIDTH, IMAGE_BUTTON_HEIGHT))
195
+ roop.globals.SELECTED_FACE_DATA_OUTPUT = OUTPUT_FACES_DATA[0][0]
196
+ if roop.globals.SELECTED_FACE_DATA_INPUT is not None:
197
+ emb1 = roop.globals.SELECTED_FACE_DATA_INPUT.embedding
198
+ emb2 = roop.globals.SELECTED_FACE_DATA_OUTPUT.embedding
199
+ dist = compute_cosine_distance(emb1, emb2)
200
+ print(f'Similarity Distance between Source->Target={dist}')
201
+ else:
202
+ show_face_selection(OUTPUT_FACES_DATA, False)
203
+ else:
204
+ print('No face found!')
205
+ roop.globals.target_path = None
206
+
207
+ elif is_video(target_path) or target_path.lower().endswith(('gif')):
208
+ roop.globals.target_path = target_path
209
+ RECENT_DIRECTORY_TARGET = os.path.dirname(roop.globals.target_path)
210
+ if roop.globals.many_faces:
211
+ roop.globals.SELECTED_FACE_DATA_OUTPUT = None
212
+ image = render_video_preview(target_path, (IMAGE_BUTTON_WIDTH, IMAGE_BUTTON_HEIGHT))
213
+ else:
214
+ max_frame = get_video_frame_total(roop.globals.target_path)
215
+ dialog = ctk.CTkInputDialog(text=f"Please input frame number with target face (1 - {max_frame})", title="Extract Face from Video")
216
+ selected_frame = dialog.get_input()
217
+ try:
218
+ selected_frame = int(selected_frame)
219
+ except:
220
+ selected_frame = 1
221
+
222
+ selected_frame = max(selected_frame, 1)
223
+ selected_frame = min(selected_frame, max_frame)
224
+ OUTPUT_FACES_DATA = extract_face_images(roop.globals.target_path, (True, selected_frame))
225
+ if len(OUTPUT_FACES_DATA) > 0:
226
+ if len(OUTPUT_FACES_DATA) == 1:
227
+ image = render_face_from_frame(OUTPUT_FACES_DATA[0][1], (IMAGE_BUTTON_WIDTH, IMAGE_BUTTON_HEIGHT))
228
+ roop.globals.SELECTED_FACE_DATA_OUTPUT = OUTPUT_FACES_DATA[0][0]
229
+ else:
230
+ show_face_selection(OUTPUT_FACES_DATA, False)
231
+ else:
232
+ roop.globals.target_path = None
233
+
234
+ else:
235
+ roop.globals.target_path = None
236
+
237
+ target_button.configure(image=image)
238
+ target_button._draw()
239
+
240
+
241
+
242
+ def select_output_path(start):
243
+ global RECENT_DIRECTORY_OUTPUT
244
+
245
+
246
+ def params_gen_func(proc, frame):
247
+ return {"original_frame": frame, "blend_ratio": 0.5, "swap_mode": "all", "input_face_datas": [roop.globals.SELECTED_FACE_DATA_INPUT], "target_face_datas": [roop.globals.SELECTED_FACE_DATA_OUTPUT]}
248
+
249
+
250
+ def select_output_path(start: Callable[[], None]) -> None:
251
+ global RECENT_DIRECTORY_OUTPUT
252
+
253
+ if roop.globals.use_batch:
254
+ roop.globals.target_folder_path = ctk.filedialog.askdirectory(title='Select folder to batch process')
255
+ print(f'Batch process folder set to {roop.globals.target_folder_path}')
256
+ else:
257
+ roop.globals.target_folder_path = None
258
+
259
+ if roop.globals.target_folder_path is not None:
260
+ output_path = ctk.filedialog.askdirectory(title='Select output folder')
261
+ elif is_image(roop.globals.target_path) and has_extension(roop.globals.target_path, ['gif']) == False:
262
+ output_path = ctk.filedialog.asksaveasfilename(title='Save image output file', defaultextension='.png', initialfile='output.png', initialdir=RECENT_DIRECTORY_OUTPUT)
263
+ elif is_video(roop.globals.target_path) or has_extension(roop.globals.target_path, ['gif']) == True:
264
+ output_path = ctk.filedialog.asksaveasfilename(title='Save video output file', defaultextension='.mp4', initialfile='output.mp4', initialdir=RECENT_DIRECTORY_OUTPUT)
265
+ else:
266
+ output_path = None
267
+
268
+ if output_path:
269
+ roop.globals.output_path = output_path
270
+ RECENT_DIRECTORY_OUTPUT = os.path.dirname(roop.globals.output_path)
271
+ start()
272
+
273
+
274
+ def select_enhancer(choice):
275
+ roop.globals.selected_enhancer = choice
276
+
277
+
278
+ def show_result():
279
+ open_with_default_app(roop.globals.output_path)
280
+
281
+
282
+
283
+ def render_image_preview(image_path: str, size: Tuple[int, int]) -> ctk.CTkImage:
284
+ image = Image.open(image_path)
285
+ if size:
286
+ image = ImageOps.fit(image, size, Image.LANCZOS)
287
+ return ctk.CTkImage(image, size=image.size)
288
+
289
+
290
+ def render_face_from_frame(face, size: Tuple[int, int] = None) -> ctk.CTkImage:
291
+ image = Image.fromarray(cv2.cvtColor(face, cv2.COLOR_BGR2RGB))
292
+ image = ImageOps.fit(image, (IMAGE_BUTTON_WIDTH, IMAGE_BUTTON_HEIGHT), Image.LANCZOS)
293
+ return ctk.CTkImage(image, size=image.size)
294
+
295
+
296
+ def render_video_preview(video_path: str, size: Tuple[int, int], frame_number: int = 0) -> ctk.CTkImage:
297
+ capture = cv2.VideoCapture(video_path)
298
+ if frame_number:
299
+ capture.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
300
+ has_frame, frame = capture.read()
301
+ if has_frame:
302
+ image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
303
+ if size:
304
+ image = ImageOps.fit(image, size, Image.LANCZOS)
305
+ return ctk.CTkImage(image, size=image.size)
306
+ capture.release()
307
+ cv2.destroyAllWindows()
308
+
309
+
310
+ def toggle_preview() -> None:
311
+ if PREVIEW.state() == 'normal':
312
+ PREVIEW.withdraw()
313
+ elif roop.globals.source_path and roop.globals.target_path:
314
+ init_preview()
315
+ update_preview()
316
+ PREVIEW.deiconify()
317
+
318
+
319
+ def init_preview() -> None:
320
+ if is_image(roop.globals.target_path):
321
+ preview_slider.pack_forget()
322
+ if is_video(roop.globals.target_path):
323
+ video_frame_total = get_video_frame_total(roop.globals.target_path)
324
+ preview_slider.configure(to=video_frame_total)
325
+ preview_slider.pack(fill='x')
326
+ preview_slider.set(0)
327
+
328
+
329
+ def update_preview(frame_number: int = 0) -> None:
330
+ if roop.globals.source_path and roop.globals.target_path:
331
+ temp_frame = get_video_frame(roop.globals.target_path, frame_number)
332
+ swap_mode = "selected"
333
+ processors = "faceswap"
334
+ if roop.globals.selected_enhancer == 'GFPGAN':
335
+ processors += ",gfpgan"
336
+ elif roop.globals.selected_enhancer == 'Codeformer':
337
+ processors += ",codeformer"
338
+
339
+ temp_frame, _ = roop.globals.IMAGE_CHAIN_PROCESSOR.run_chain(temp_frame,
340
+ {"swap_mode": swap_mode,
341
+ "input_face_datas": [roop.globals.SELECTED_FACE_DATA_INPUT], "target_face_datas": [roop.globals.SELECTED_FACE_DATA_OUTPUT]},
342
+ processors)
343
+ image = Image.fromarray(cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB))
344
+ image = ImageOps.contain(image, (PREVIEW_MAX_WIDTH, PREVIEW_MAX_HEIGHT), Image.LANCZOS)
345
+ image = ctk.CTkImage(image, size=image.size)
346
+ preview_label.configure(image=image)
347
+
348
+
349
+ def create_select_faces_win(parent) -> ctk.CTkToplevel:
350
+ global scrollable_frame
351
+
352
+ face_win = ctk.CTkToplevel(parent)
353
+ face_win.minsize(800, 400)
354
+ face_win.title('Select Face(s)')
355
+ face_win.configure()
356
+ face_win.withdraw()
357
+ face_win.protocol('WM_DELETE_WINDOW', lambda: cancel_face_selection())
358
+ scrollable_frame = ctk.CTkScrollableFrame(face_win, orientation='horizontal', label_text='Choose face by clicking on it', width=(IMAGE_BUTTON_WIDTH + 40)*3, height=IMAGE_BUTTON_HEIGHT+32)
359
+ scrollable_frame.grid(row=0, column=0, padx=20, pady=20)
360
+ scrollable_frame.place(relx=0.05, rely=0.05)
361
+ cancel_button = ctk.CTkButton(face_win, text='Cancel', command=lambda: cancel_face_selection())
362
+ cancel_button.place(relx=0.05, rely=0.85, relwidth=0.075, relheight=0.075)
363
+
364
+ return face_win
365
+
366
+ def cancel_face_selection() -> None:
367
+ toggle_face_selection();
368
+ ROOT.wm_attributes('-disabled', False)
369
+ ROOT.focus()
370
+
371
+ def select_face(index, is_input) -> None:
372
+ global source_button, target_button, INPUT_FACES_DATA, OUTPUT_FACES_DATA
373
+
374
+ if is_input:
375
+ roop.globals.SELECTED_FACE_DATA_INPUT = INPUT_FACES_DATA[index][0]
376
+ image = render_face_from_frame(INPUT_FACES_DATA[index][1], (IMAGE_BUTTON_WIDTH, IMAGE_BUTTON_HEIGHT))
377
+ source_button.configure(image=image)
378
+ source_button._draw()
379
+ else:
380
+ roop.globals.SELECTED_FACE_DATA_OUTPUT = OUTPUT_FACES_DATA[index][0]
381
+ image = render_face_from_frame(OUTPUT_FACES_DATA[index][1], (IMAGE_BUTTON_WIDTH, IMAGE_BUTTON_HEIGHT))
382
+ target_button.configure(image=image)
383
+ target_button._draw()
384
+ if roop.globals.SELECTED_FACE_DATA_INPUT is not None:
385
+ emb1 = roop.globals.SELECTED_FACE_DATA_INPUT.embedding
386
+ emb2 = roop.globals.SELECTED_FACE_DATA_OUTPUT.embedding
387
+ dist = compute_cosine_distance(emb1, emb2)
388
+ print(f'Similarity Distance between Source->Target={dist}')
389
+
390
+ toggle_face_selection();
391
+ ROOT.wm_attributes('-disabled', False)
392
+ ROOT.focus()
393
+
394
+
395
+
396
+ def toggle_face_selection() -> None:
397
+ if FACE_SELECT.state() == 'normal':
398
+ FACE_SELECT.withdraw()
399
+ else:
400
+ FACE_SELECT.deiconify()
401
+
402
+
403
+ def show_face_selection(faces, is_input):
404
+ global FACE_BUTTONS, scrollable_frame
405
+
406
+ ROOT.wm_attributes('-disabled', True)
407
+
408
+ if len(FACE_BUTTONS) > 0:
409
+ for b in FACE_BUTTONS:
410
+ try:
411
+ # b.place_forget()
412
+ b.destroy()
413
+ except:
414
+ continue
415
+ FACE_BUTTONS.clear()
416
+
417
+ for i,face in enumerate(faces):
418
+ image = render_face_from_frame(face[1], (128, 128))
419
+ score = face[0]['det_score']
420
+ age = face[0]['age']
421
+ button_text = f'Score: {score} - Sex: {face[0].sex} - Age: {age}'
422
+ face_button = ctk.CTkButton(scrollable_frame, text=button_text, width=128, height=128, compound='top', anchor='center', command=lambda faceindex=i: select_face(index=faceindex, is_input=is_input))
423
+ face_button.grid(row=0, column=i, pady=5, padx=5)
424
+ face_button.configure(image=image)
425
+ face_button._draw()
426
+ FACE_BUTTONS.append(face_button)
427
+
428
+ FACE_SELECT.deiconify()
429
+
roop/utilities.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import mimetypes
3
+ import os
4
+ import platform
5
+ import shutil
6
+ import ssl
7
+ import subprocess
8
+ import sys
9
+ import urllib
10
+ import torch
11
+ import gradio
12
+ import tempfile
13
+
14
+ from pathlib import Path
15
+ from typing import List, Any
16
+ from tqdm import tqdm
17
+ from scipy.spatial import distance
18
+
19
+ import roop.globals
20
+
21
+ TEMP_FILE = 'temp.mp4'
22
+ TEMP_DIRECTORY = 'temp'
23
+
24
+ # monkey patch ssl for mac
25
+ if platform.system().lower() == 'darwin':
26
+ ssl._create_default_https_context = ssl._create_unverified_context
27
+
28
+
29
+ def run_ffmpeg(args: List[str]) -> bool:
30
+ commands = ['ffmpeg', '-hide_banner', '-hwaccel', 'auto', '-y', '-loglevel', roop.globals.log_level]
31
+ commands.extend(args)
32
+ print (" ".join(commands))
33
+ try:
34
+ subprocess.check_output(commands, stderr=subprocess.STDOUT)
35
+ return True
36
+ except Exception:
37
+ pass
38
+ return False
39
+
40
+
41
+ def detect_fps(target_path: str) -> float:
42
+ command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=r_frame_rate', '-of', 'default=noprint_wrappers=1:nokey=1', target_path]
43
+ output = subprocess.check_output(command).decode().strip().split('/')
44
+ try:
45
+ numerator, denominator = map(int, output)
46
+ return numerator / denominator
47
+ except Exception:
48
+ pass
49
+ return 24.0
50
+
51
+ def cut_video(original_video: str, cut_video: str, start_frame: int, end_frame: int):
52
+ fps = detect_fps(original_video)
53
+ start_time = start_frame / fps
54
+ num_frames = end_frame - start_frame
55
+
56
+ run_ffmpeg(['-ss', str(start_time), '-i', original_video, '-c:v', roop.globals.video_encoder, '-c:a', 'aac', '-frames:v', str(num_frames), cut_video])
57
+
58
+ def join_videos(videos: List[str], dest_filename: str):
59
+ inputs = []
60
+ filter = ''
61
+ for i,v in enumerate(videos):
62
+ inputs.append('-i')
63
+ inputs.append(v)
64
+ filter += f'[{i}:v:0][{i}:a:0]'
65
+ run_ffmpeg([" ".join(inputs), '-filter_complex', f'"{filter}concat=n={len(videos)}:v=1:a=1[outv][outa]"', '-map', '"[outv]"', '-map', '"[outa]"', dest_filename])
66
+
67
+ def extract_frames(target_path: str) -> None:
68
+ create_temp(target_path)
69
+ temp_directory_path = get_temp_directory_path(target_path)
70
+ run_ffmpeg(['-i', target_path, '-pix_fmt', 'rgb24', os.path.join(temp_directory_path, f'%04d.{roop.globals.CFG.output_image_format}')])
71
+ return temp_directory_path
72
+
73
+
74
+ def create_video(target_path: str, dest_filename: str, fps: float = 24.0) -> None:
75
+ temp_directory_path = get_temp_directory_path(target_path)
76
+ run_ffmpeg(['-r', str(fps), '-i', os.path.join(temp_directory_path, f'%04d.{roop.globals.CFG.output_image_format}'), '-c:v', roop.globals.video_encoder, '-crf', str(roop.globals.video_quality), '-pix_fmt', 'yuv420p', '-vf', 'colorspace=bt709:iall=bt601-6-625:fast=1', '-y', dest_filename])
77
+ return dest_filename
78
+
79
+
80
+ def create_gif_from_video(video_path: str, gif_path):
81
+ from roop.capturer import get_video_frame
82
+
83
+ fps = detect_fps(video_path)
84
+ frame = get_video_frame(video_path)
85
+
86
+ run_ffmpeg(['-i', video_path, '-vf', f'fps={fps},scale={frame.shape[0]}:-1:flags=lanczos,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse', '-loop', '0', gif_path])
87
+
88
+
89
+ def restore_audio(intermediate_video: str, original_video: str, final_video: str) -> None:
90
+ run_ffmpeg(['-i', intermediate_video, '-i', original_video, '-c:v', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-y', final_video])
91
+
92
+
93
+ def get_temp_frame_paths(target_path: str) -> List[str]:
94
+ temp_directory_path = get_temp_directory_path(target_path)
95
+ return glob.glob((os.path.join(glob.escape(temp_directory_path), f'*.{roop.globals.CFG.output_image_format}')))
96
+
97
+
98
+ def get_temp_directory_path(target_path: str) -> str:
99
+ target_name, _ = os.path.splitext(os.path.basename(target_path))
100
+ target_directory_path = os.path.dirname(target_path)
101
+ return os.path.join(target_directory_path, TEMP_DIRECTORY, target_name)
102
+
103
+
104
+ def get_temp_output_path(target_path: str) -> str:
105
+ temp_directory_path = get_temp_directory_path(target_path)
106
+ return os.path.join(temp_directory_path, TEMP_FILE)
107
+
108
+
109
+ def normalize_output_path(source_path: str, target_path: str, output_path: str) -> Any:
110
+ if source_path and target_path:
111
+ source_name, _ = os.path.splitext(os.path.basename(source_path))
112
+ target_name, target_extension = os.path.splitext(os.path.basename(target_path))
113
+ if os.path.isdir(output_path):
114
+ return os.path.join(output_path, source_name + '-' + target_name + target_extension)
115
+ return output_path
116
+
117
+
118
+ def get_destfilename_from_path(srcfilepath: str, destfilepath: str, extension: str) -> str:
119
+ fn, ext = os.path.splitext(os.path.basename(srcfilepath))
120
+ if '.' in extension:
121
+ return os.path.join(destfilepath, f'{fn}{extension}')
122
+ return os.path.join(destfilepath, f'{fn}{extension}{ext}')
123
+
124
+
125
+
126
+
127
+ def create_temp(target_path: str) -> None:
128
+ temp_directory_path = get_temp_directory_path(target_path)
129
+ Path(temp_directory_path).mkdir(parents=True, exist_ok=True)
130
+
131
+
132
+ def move_temp(target_path: str, output_path: str) -> None:
133
+ temp_output_path = get_temp_output_path(target_path)
134
+ if os.path.isfile(temp_output_path):
135
+ if os.path.isfile(output_path):
136
+ os.remove(output_path)
137
+ shutil.move(temp_output_path, output_path)
138
+
139
+
140
+ def clean_temp(target_path: str) -> None:
141
+ temp_directory_path = get_temp_directory_path(target_path)
142
+ parent_directory_path = os.path.dirname(temp_directory_path)
143
+ if not roop.globals.keep_frames and os.path.isdir(temp_directory_path):
144
+ shutil.rmtree(temp_directory_path)
145
+ if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path):
146
+ os.rmdir(parent_directory_path)
147
+
148
+
149
+ def has_image_extension(image_path: str) -> bool:
150
+ return image_path.lower().endswith(('png', 'jpg', 'jpeg', 'webp'))
151
+
152
+ def has_extension(filepath: str, extensions: List[str]) -> bool:
153
+ return filepath.lower().endswith(tuple(extensions))
154
+
155
+
156
+ def is_image(image_path: str) -> bool:
157
+ if image_path and os.path.isfile(image_path):
158
+ mimetype, _ = mimetypes.guess_type(image_path)
159
+ return bool(mimetype and mimetype.startswith('image/'))
160
+ return False
161
+
162
+
163
+ def is_video(video_path: str) -> bool:
164
+ if video_path and os.path.isfile(video_path):
165
+ mimetype, _ = mimetypes.guess_type(video_path)
166
+ return bool(mimetype and mimetype.startswith('video/'))
167
+ return False
168
+
169
+
170
+ def conditional_download(download_directory_path: str, urls: List[str]) -> None:
171
+ if not os.path.exists(download_directory_path):
172
+ os.makedirs(download_directory_path)
173
+ for url in urls:
174
+ download_file_path = os.path.join(download_directory_path, os.path.basename(url))
175
+ if not os.path.exists(download_file_path):
176
+ request = urllib.request.urlopen(url) # type: ignore[attr-defined]
177
+ total = int(request.headers.get('Content-Length', 0))
178
+ with tqdm(total=total, desc=f'Downloading {url}', unit='B', unit_scale=True, unit_divisor=1024) as progress:
179
+ urllib.request.urlretrieve(url, download_file_path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined]
180
+
181
+
182
+ def resolve_relative_path(path: str) -> str:
183
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
184
+
185
+ def get_device() -> str:
186
+ if 'CUDAExecutionProvider' in roop.globals.execution_providers:
187
+ return 'cuda'
188
+ if 'CoreMLExecutionProvider' in roop.globals.execution_providers:
189
+ return 'mps'
190
+ return 'cpu'
191
+
192
+
193
+ # Taken from https://stackoverflow.com/a/68842705
194
+ def get_platform():
195
+ if sys.platform == 'linux':
196
+ try:
197
+ proc_version = open('/proc/version').read()
198
+ if 'Microsoft' in proc_version:
199
+ return 'wsl'
200
+ except:
201
+ pass
202
+ return sys.platform
203
+
204
+ def open_with_default_app(filename):
205
+ if filename == None:
206
+ return
207
+ platform = get_platform()
208
+ if platform == 'darwin':
209
+ subprocess.call(('open', filename))
210
+ elif platform in ['win64', 'win32']:
211
+ os.startfile(filename.replace('/','\\'))
212
+ elif platform == 'wsl':
213
+ subprocess.call('cmd.exe /C start'.split() + [filename])
214
+ else: # linux variants
215
+ subprocess.call(('xdg-open', filename))
216
+
217
+ def prepare_for_batch(target_files):
218
+ print("Preparing temp files")
219
+ tempfolder = os.path.join(tempfile.gettempdir(), "rooptmp")
220
+ if os.path.exists(tempfolder):
221
+ shutil.rmtree(tempfolder)
222
+ Path(tempfolder).mkdir(parents=True, exist_ok=True)
223
+ for f in target_files:
224
+ newname = os.path.basename(f.name)
225
+ shutil.move(f.name, os.path.join(tempfolder, newname))
226
+ return tempfolder
227
+
228
+
229
+
230
+ def create_version_html():
231
+ python_version = ".".join([str(x) for x in sys.version_info[0:3]])
232
+ versions_html = f"""
233
+ python: <span title="{sys.version}">{python_version}</span>
234
+
235
+ torch: {getattr(torch, '__long_version__',torch.__version__)}
236
+
237
+ gradio: {gradio.__version__}
238
+ """
239
+ return versions_html
240
+
241
+
242
+ def compute_cosine_distance(emb1, emb2):
243
+ return distance.cosine(emb1, emb2)